Пример #1
0
def make_param_terminal_conditions(terminal_model_variables, ref_variables, xi_dict, model, options):
    terminal_states = terminal_model_variables

    awelogger.logger.info('Parameterizing terminal constraint...')
    xi_f = xi_dict['xi']['xi_f']
    terminal_splines = parameterization.get_splines(terminal_model_variables, xi_dict, 'terminal')

    xd_struct = model.variables_dict['xd']

    spline_list = []

    for i in range(xd_struct.cat.shape[0]):
        (state_name, state_dim) = xd_struct.getCanonicalIndex(i)
        spline_list += [terminal_splines[state_name + '_' + str(state_dim)](xi_f)]

    var_ref_terminal = xd_struct(cas.vertcat(*spline_list))

    # initializate lists
    terminal_conditions_eq_list = []
    black_list = []

    # compute black list of variables that should not be constrained
    variable_list = set(xd_struct.keys()) - set(black_list)

    # iterate over variables to construct constraints
    for variable in variable_list:
        terminal_conditions_eq_list += [terminal_states['xd', variable] - var_ref_terminal[variable] / model.scaling['xd'][variable]]
    terminal_conditions_eq = cas.vertcat(*terminal_conditions_eq_list)

    return terminal_conditions_eq
Пример #2
0
def list_filament_observer_and_normal_info(point_obs,
                                           filament_list,
                                           options,
                                           n_hat=None):
    # join the vortex_list to the observation data

    n_filaments = filament_list.shape[1]

    epsilon = options['aero']['vortex']['epsilon']

    point_obs_extended = []
    for jdx in range(3):
        point_obs_extended = cas.vertcat(
            point_obs_extended,
            vect_op.ones_sx((1, n_filaments)) * point_obs[jdx])
    eps_extended = vect_op.ones_sx((1, n_filaments)) * epsilon

    seg_list = cas.vertcat(point_obs_extended, filament_list, eps_extended)

    if n_hat is not None:
        n_hat_ext = []
        for jdx in range(3):
            n_hat_ext = cas.vertcat(
                n_hat_ext,
                vect_op.ones_sx((1, n_filaments)) * n_hat[jdx])

        seg_list = cas.vertcat(seg_list, n_hat_ext)

    return seg_list
Пример #3
0
def plot_actuator_area_in_aerotime(solution_dict, cosmetics, fig_num, reload_dict):

    outputs = solution_dict['outputs']
    architecture = solution_dict['architecture']
    options = solution_dict['options']

    n_k = options['nlp']['n_k']

    fig = plt.figure(fig_num)

    layer_parents = architecture.layer_nodes

    for parent in layer_parents:
        tgrid_coll = reload_dict['tgrid_xa_aerotime' + str(parent)]

        area = []

        for kdx in range(n_k):
            area = cas.vertcat(area, cas.vertcat(*outputs['coll_outputs', kdx, :, 'actuator', 'area' + str(parent)]))

        area = np.array(area)

        avg_radius = reload_dict['avg_radius' + str(parent)]
        avg_area = np.pi * avg_radius**2.

        plt.plot(tgrid_coll, area / avg_area)

    plt.xlabel('t u_infty / bar R [-]')
    plt.ylabel('A / (pi bar R^2) [-]')

    plt.show()
Пример #4
0
def merge_integral_output_values(int_out, name, plot_dict, cosmetics):

    # read in inputs
    discretization = plot_dict['discretization']
    if discretization == 'direct_collocation':
        # total time points
        tgrid_x_coll = plot_dict['time_grids']['x_coll']

    # interval time points
    tgrid_x = plot_dict['time_grids']['x']

    if discretization == 'multiple_shooting':
        # take interval values
        output_values = np.array(cas.vertcat(*int_out['int_out',:,name]).full())
        tgrid = tgrid_x

    elif discretization == 'direct_collocation':
        output_values = []
        # merge interval and node values
        for k in range(plot_dict['n_k']+1):
            # add interval values
            output_values = cas.vertcat(output_values, int_out['int_out',k, name])
            if (cosmetics['plot_coll'] and k < plot_dict['n_k']):
                # add node values
                output_values = cas.vertcat(output_values, cas.vertcat(*int_out['coll_int_out',k, :, name]))

        if cosmetics['plot_coll']:
            tgrid = tgrid_x_coll
        else:
            tgrid = tgrid_x

    # make list of time grid and values
    tgrid = list(chain.from_iterable(tgrid.full().tolist()))

    return output_values, tgrid
Пример #5
0
def get_naca_shell(chord, naca="0012", center_at_quarter_chord = True):

    m = np.float(naca[0]) / 100.
    p = np.float(naca[1]) / 10.
    t = np.float(naca[2:]) / 100.

    s_list = np.arange(0., 101.) / 100.

    x_upper = []
    x_lower = []

    for s in s_list:
        [xu, xl, yu, yl] = get_naca_airfoil_coordinates(s, m, p, t)

        new_x_upper = xu * vect_op.xhat_np() + yu * vect_op.zhat_np()
        new_x_lower = xl * vect_op.xhat_np() + yl * vect_op.zhat_np()

        if center_at_quarter_chord:
            new_x_upper = new_x_upper - vect_op.xhat_np() / 4.
            new_x_lower = new_x_lower - vect_op.xhat_np() / 4.

        x_upper = cas.vertcat(x_upper, (chord * new_x_upper.T))
        x_lower = cas.vertcat(x_lower, (chord * new_x_lower.T))

    x_upper = np.array(x_upper)
    x_lower = np.array(x_lower)[::-1]

    x = np.array(cas.vertcat(x_lower, x_upper))

    return x
Пример #6
0
def get_orbit_cone_parameters(options, model, l_t):

    # get rotation plane axes:
    # zhat is along tether (out)
    # xhat is along earth-fixed yhat
    # yhat is up and out, back towards the wind

    ehat_tether = get_ehat_tether(options)
    ehat_side = vect_op.yhat()
    ehat_up = vect_op.normed_cross(ehat_tether, ehat_side)

    # get radius and height of the cones in use
    # two cone types specified, based on main tether (single kite option) and secondary tether (multi-kite option)
    # radius is dependent on flight velocity
    # height is a dependent
    hypotenuse_list = cas.vertcat(l_t, options['theta']['l_s'])
    [radius, t_f_guess] = estimate_radius_and_flight_time(options, model)

    height_list = []
    for hdx in range(hypotenuse_list.shape[0]):
        hypotenuse = hypotenuse_list[hdx]
        height = (hypotenuse**2. - radius**2.)**0.5
        height_list = cas.vertcat(height_list, height)

    return height_list, radius, ehat_tether, ehat_side, ehat_up
Пример #7
0
def list_filaments_kiteobs_and_normal_info(filament_list, options, variables,
                                           parameters, kite_obs, architecture,
                                           include_normal_info):

    n_filaments = filament_list.shape[1]

    parent_obs = architecture.parent_map[kite_obs]

    point_obs = variables['xd']['q' + str(kite_obs) + str(parent_obs)]

    seg_list = list_filament_observer_and_normal_info(point_obs, filament_list,
                                                      options)

    if include_normal_info:

        n_vec_val = unit_normal.get_n_vec(options, parent_obs, variables,
                                          parameters, architecture)
        n_hat = vect_op.normalize(n_vec_val)

        n_hat_ext = []
        for jdx in range(3):
            n_hat_ext = cas.vertcat(
                n_hat_ext,
                vect_op.ones_sx((1, n_filaments)) * n_hat[jdx])

        seg_list = cas.vertcat(seg_list, n_hat_ext)

    return seg_list
Пример #8
0
    def __postprocess_sim(self):
        """ Postprocess simulation results.
        """

        # vectorize result lists for plotting
        for var_type in set(self.__trial.model.variables_dict.keys()) - set(
            ['theta', 'xddot']):
            for name in list(
                    self.__trial.model.variables_dict[var_type].keys()):
                for dim in range(self.__trial.model.variables_dict[var_type]
                                 [name].shape[0]):
                    self.__visualization.plot_dict[var_type][name][
                        dim] = ct.vertcat(*self.__visualization.
                                          plot_dict[var_type][name][dim])

        for output_type in list(self.__trial.model.outputs.keys()):
            for name in list(
                    self.__trial.model.outputs_dict[output_type].keys()):
                for dim in range(self.__trial.model.outputs_dict[output_type]
                                 [name].shape[0]):
                    self.__visualization.plot_dict['outputs'][output_type][
                        name][dim] = ct.vertcat(
                            *self.__visualization.plot_dict['outputs']
                            [output_type][name][dim]).full()

        for name in self.__visualization.plot_dict['integral_variables']:
            self.__visualization.plot_dict['integral_outputs'][name][
                0] = ct.vertcat(*self.__visualization.
                                plot_dict['integral_outputs'][name][0])
Пример #9
0
def collect_type_constraints(nlp, is_equality):

    constraints = []
    list_names = []
    constraint_sym = []

    # list the evaluated constraints at solution
    g = nlp.g

    g_sym = cas.SX.sym('g_sym', g.shape)

    for gdx in range(g.shape[0]):
        cstr_name = g.getCanonicalIndex(gdx)

        condition = 'inequality' in cstr_name
        if is_equality:
            condition = not condition

        if condition:
            constraints = cas.vertcat(constraints, g.cat[gdx])
            constraint_sym = cas.vertcat(constraint_sym, g_sym[gdx])

            name_list_strings = list(map(str, cstr_name))
            name_list = [name + '_' for name in name_list_strings[:-1]
                         ] + [name_list_strings[-1]]
            list_names += [''.join(name_list)]

    cstr_fun = cas.Function('cstr_fun', [g_sym], [constraint_sym])

    return constraints, list_names, cstr_fun
Пример #10
0
def get_radius_inequality(model_options, variables, kite, parent, parameters):
    # no projection included...

    b_ref = parameters['theta0', 'geometry', 'b_ref']
    half_span = b_ref / 2.
    num_ref = model_options['model_bounds']['anticollision_radius']['num_ref']

    # half_span - radius < 0
    # half_span * den - num < 0

    dq = variables['xd']['dq' + str(kite) + str(parent)]
    ddq = variables['xddot']['ddq' + str(kite) + str(parent)]

    gamma_dot = cas.vertcat(0., dq[1], dq[2])
    gamma_ddot = cas.vertcat(0., ddq[1], ddq[2])

    num = cas.mtimes(gamma_dot.T, gamma_dot)**2.

    den_vec = gamma_ddot * cas.mtimes(gamma_dot.T,
                                      gamma_dot) - gamma_dot * cas.mtimes(
                                          gamma_dot.T, gamma_ddot)
    den = vect_op.norm(den_vec)

    inequality = (half_span * den - num) / num_ref

    return inequality
Пример #11
0
def get_induced_velocity_at_kite(model_options, variables, parameters,
                                 architecture, kite, outputs):

    lin_params = parameters['lin']

    var_sym = {}
    var_sym_cat = []
    var_actual_cat = []
    for var_type in variables.keys():
        var_sym[var_type] = variables[var_type](cas.SX.sym(
            var_type, (variables[var_type].cat.shape)))
        var_sym_cat = cas.vertcat(var_sym_cat, var_sym[var_type].cat)
        var_actual_cat = cas.vertcat(var_actual_cat, variables[var_type].cat)

    columnized_list = outputs['vortex']['filament_list']
    filament_list = vortex_filament_list.decolumnize(model_options,
                                                     architecture,
                                                     columnized_list)
    uind_sym = vortex_flow.get_induced_velocity_at_kite(
        model_options, filament_list, variables, architecture, kite)
    jac_sym = cas.jacobian(uind_sym, var_sym_cat)

    uind_fun = cas.Function('uind_fun', [var_sym_cat], [uind_sym])
    jac_fun = cas.Function('jac_fun', [var_sym_cat], [jac_sym])

    slope = jac_fun(lin_params)
    const = uind_fun(lin_params)

    uind_lin = cas.mtimes(slope, var_actual_cat) + const

    return uind_lin
Пример #12
0
def generate_holonomic_scaling(options, architecture, variables, parameters):
    scaling = options['scaling']
    holonomic_scaling = []

    for n in range(1, architecture.number_of_nodes):
        seg_props = tether_aero.get_tether_segment_properties(options, architecture, variables, parameters, upper_node=n)

        scaling_length = seg_props['scaling_length']
        scaling_speed = seg_props['scaling_speed']
        scaling_acc = seg_props['scaling_acc']

        g_loc = scaling_length**2.
        gdot_loc = 2. * scaling_length * scaling_speed
        gddot_loc = 2. * scaling_length * scaling_acc + 2. * scaling_speed**2.

        loc_scaling = get_constraint_lhs(g_loc, gdot_loc, gddot_loc, parameters)
        holonomic_scaling = cas.vertcat(holonomic_scaling, loc_scaling)

    number_of_kites = len(architecture.kite_nodes)
    if number_of_kites > 1 and options['cross_tether']:
        for l in architecture.layer_nodes:
            layer_kites = architecture.kites_map[l]
            number_of_layer_kites = len(layer_kites)

            if number_of_layer_kites == 2:
                holonomic_scaling = cas.vertcat(holonomic_scaling, scaling['theta']['l_c'] ** 2)
            else:
                for kite in layer_kites:
                    holonomic_scaling = cas.vertcat(holonomic_scaling, scaling['theta']['l_c'] ** 2)

    return holonomic_scaling
Пример #13
0
def collect_type_constraints(nlp, cstr_type):

    found_cstrs = []
    found_names = []
    found_syms = []

    # list the evaluated constraints at solution
    ocp_cstr_list = nlp.ocp_cstr_list

    name_list = ocp_cstr_list.get_name_list('all')

    g = nlp.g
    g_sym = cas.SX.sym('g_sym', g.shape)

    for cstr in ocp_cstr_list.get_list('all'):
        local_name = cstr.name
        if cstr.cstr_type == cstr_type:

            indices = [
                idx for idx, name in enumerate(name_list) if name == local_name
            ]

            for idx in indices:
                found_cstrs = cas.vertcat(found_cstrs, g[idx])
                found_syms = cas.vertcat(found_syms, g_sym[idx])
                found_names += [local_name]

    cstr_fun = cas.Function('cstr_fun', [g_sym], [found_syms])

    return found_cstrs, found_names, cstr_fun
Пример #14
0
def get_control_frequency(nlp_options, model, V, outputs):

    if 'control_freq' not in list(outputs.keys()):
        outputs['control_freq'] = {}

    nk = nlp_options['n_k']

    for delta in struct_op.subkeys(model.variables, 'u'):
        if ('delta' in delta) and (not 'ddelta' in delta):
            variable = V['u', 0, delta]

            for dim in range(variable.shape[0]):
                control_freq = vect_op.estimate_1d_frequency(
                    cas.vertcat(*V['u', :, delta, dim]),
                    dt=V['theta', 't_f'] / nk)
                outputs['control_freq'][delta + '_' + str(dim)] = control_freq

    for delta in struct_op.subkeys(model.variables, 'xd'):
        if ('delta' in delta) and (not 'ddelta' in delta):
            variable = V['xd', 0, delta]

            for dim in range(variable.shape[0]):
                control_freq = vect_op.estimate_1d_frequency(
                    cas.vertcat(*V['xd', :, delta, dim]),
                    dt=V['theta', 't_f'] / nk)
                outputs['control_freq'][delta + '_' + str(dim)] = control_freq

    return outputs
Пример #15
0
def make_param_initial_conditions(initial_model_variables, ref_variables, xi_dict, model,options):
    initial_states = initial_model_variables

    awelogger.logger.info('Parameterizing initial constraint...')
    xi_0 = xi_dict['xi']['xi_0']
    initial_splines = parameterization.get_splines(initial_model_variables, xi_dict, 'initial')

    xd_struct = model.variables_dict['xd']

    spline_list = []

    for i in range(xd_struct.cat.shape[0]):
        (state_name, state_dim) = xd_struct.getCanonicalIndex(i)
        spline_list += [initial_splines[state_name + '_' + str(state_dim)](xi_0)]

    var_ref_initial = xd_struct(cas.vertcat(*spline_list))

    # initializate lists
    initial_conditions_eq_list = []
    black_list = []

    # compute black list of variables that should not be constrained
    if options['trajectory']['type'] == 'compromised_landing' and options['compromised_landing']['emergency_scenario'][0] == 'structural_damages':
        broken_kite = options['compromised_landing']['emergency_scenario'][1]
        broken_parent = model.architecture.parent_map[broken_kite]
        black_list += ['coeff' + str(broken_kite) + str(broken_parent)]
    variable_list = set(xd_struct.keys()) - set(black_list)

    # iterate over variables to construct constraints
    for variable in variable_list:
        initial_conditions_eq_list += [initial_states['xd', variable] - var_ref_initial[variable] / model.scaling['xd'][variable]]
    initial_conditions_eq = cas.vertcat(*initial_conditions_eq_list)

    return initial_conditions_eq
Пример #16
0
def get_gamma_extrema(plot_dict):
    n_k = plot_dict['n_k']
    d = plot_dict['d']
    kite_nodes = plot_dict['architecture'].kite_nodes
    wake_nodes = plot_dict['options']['model']['aero']['vortex']['wake_nodes']

    rings = wake_nodes - 1

    gamma_max = -1.e5
    gamma_min = 1.e5

    for kite in kite_nodes:
        for ring in range(rings):
            for ndx in range(n_k):
                for ddx in range(d):

                    gamma_name = 'wg' + '_' + str(kite) + '_' + str(ring)
                    var = plot_dict['V_plot']['coll_var', ndx, ddx, 'xl',
                                              gamma_name]

                    gamma_max = np.max(np.array(cas.vertcat(gamma_max, var)))
                    gamma_min = np.min(np.array(cas.vertcat(gamma_min, var)))

    # so that gamma = 0 vortex filaments will be drawn in white...
    gamma_max = np.max(np.array([gamma_max, -1. * gamma_min]))
    gamma_min = -1. * gamma_max

    return gamma_min, gamma_max
Пример #17
0
def draw_lifting_surface(ax,
                         q,
                         r,
                         b_ref,
                         c_tipn,
                         c_root,
                         c_tipp,
                         kite_color,
                         side,
                         body_cross_sections_per_meter,
                         naca="0012"):

    r_dcm = np.array(cas.reshape(r, (3, 3)))

    num_spanwise = np.ceil(b_ref * body_cross_sections_per_meter / 2.)

    ypos = np.arange(-1. * num_spanwise, num_spanwise + 1.) / num_spanwise / 2.

    leading_edges = []
    trailing_edges = []

    for y in ypos:

        yloc = cas.mtimes(r_dcm, vect_op.yhat_np()) * y * b_ref

        s = np.abs(y) / 0.5  # 1 at tips and 0 at root
        if y < 0:
            c_local = c_root * (1. - s) + c_tipn * s
        else:
            c_local = c_root * (1. - s) + c_tipp * s

        basic_shell = get_naca_shell(c_local, naca)

        basic_leading_ege = basic_shell[np.argmin(basic_shell[:, 0]), :]
        basic_trailing_ege = basic_shell[np.argmax(basic_shell[:, 0]), :]

        new_leading_edge = q + yloc + np.array(
            cas.mtimes(r_dcm, basic_leading_ege.T))
        new_trailing_edge = q + yloc + np.array(
            cas.mtimes(r_dcm, basic_trailing_ege.T))

        leading_edges = cas.vertcat(leading_edges, new_leading_edge.T)
        trailing_edges = cas.vertcat(trailing_edges, new_trailing_edge.T)

        horizontal_shell = []
        for idx in range(basic_shell[:, 0].shape[0]):

            new_point = q + yloc + np.array(
                cas.mtimes(r_dcm, basic_shell[idx, :].T))

            horizontal_shell = cas.vertcat(horizontal_shell, new_point.T)
        horizontal_shell = np.array(horizontal_shell)

        make_side_plot(ax, horizontal_shell, side, kite_color)

    make_side_plot(ax, leading_edges, side, kite_color)
    make_side_plot(ax, trailing_edges, side, kite_color)

    return None
Пример #18
0
    def __poly_coeffs(self):
        """Compute coefficients of interpolating polynomials and their derivatives
        """

        # discretization info
        nk = self.__n_k
        d = self.__d

        # choose collocation points
        tau_root = cas.vertcat(0.0, cas.collocation_points(d, self.__scheme))

        # coefficients of the collocation equation
        coeff_collocation = np.zeros((d + 1, d + 1))

        # coefficients of the continuity equation
        coeff_continuity = np.zeros(d + 1)

        # dimensionless time inside one control interval
        tau = cas.SX.sym('tau')

        # all collocation time points
        t = np.zeros((nk, d + 1))
        for k in range(nk):
            for j in range(d + 1):
                t[k, j] = (k + tau_root[j])

        # for all collocation points
        ls = []
        for j in range(d + 1):
            # construct lagrange polynomials to get the polynomial basis at the
            # collocation point
            l = 1
            for r in range(d + 1):
                if r != j:
                    l *= (tau - tau_root[r]) / (tau_root[j] - tau_root[r])
            lfcn = cas.Function('lfcn', [tau], [l])
            ls = cas.vertcat(ls, l)

            # evaluate the polynomial at the final time to get the coefficients of
            # the continuity equation
            coeff_continuity[j] = lfcn([1.0])

            # evaluate the time derivative of the polynomial at all collocation
            # points to get the coefficients of the continuity equation
            tfcn = cas.Function('lfcntan', [tau], [cas.jacobian(l, tau)])
            for r in range(d + 1):
                coeff_collocation[j][r] = tfcn(tau_root[r])

        # interpolating function for all polynomials
        lfcns = cas.Function('lfcns', [tau], [ls])

        self.__coeff_continuity = coeff_continuity
        self.__coeff_collocation = coeff_collocation
        self.__coeff_fun = lfcns

        return None
Пример #19
0
def merge_output_values(output_vals, output_type, output_name, dim, plot_dict, cosmetics):

    # read in inputs
    discretization = plot_dict['discretization']
    if  discretization == 'direct_collocation':

        scheme = plot_dict['options']['nlp']['collocation']['scheme']
        tgrid_coll = plot_dict['time_grids']['coll']

        # total time points
        tgrid_u_coll = plot_dict['time_grids']['x_coll'][:-1]

    # interval time points
    tgrid_u = plot_dict['time_grids']['u']

    if discretization == 'multiple_shooting':
        # take interval values
        output_values = np.array(cas.vertcat(*output_vals['outputs',:,output_type,output_name,dim]).full())
        tgrid = tgrid_u

        ndim = output_vals['outputs',0,output_type,output_name].shape[0]

    elif discretization == 'direct_collocation':
        if scheme != 'radau':
            output_values = []
            # merge interval and node values
            for k in range(plot_dict['n_k']):
                # add interval values
                output_values = cas.vertcat(output_values, output_vals['outputs',k, output_type, output_name,dim])
                if cosmetics['plot_coll']:
                    # add node values
                    output_values = cas.vertcat(output_values, cas.vertcat(*output_vals['coll_outputs',k, :, output_type, output_name,dim]))

            if cosmetics['plot_coll']:
                tgrid = tgrid_u_coll
            else:
                tgrid = tgrid_u
            ndim = output_vals['outputs',0,output_type,output_name].shape[0]

        else:
            if cosmetics['plot_coll']:
                # add only node values for radau case
                output_values = np.array(struct_op.coll_slice_to_vec(output_vals['coll_outputs',:,:,output_type,output_name,dim]))
                tgrid = tgrid_coll
                ndim = output_vals['coll_outputs',0,0,output_type,output_name].shape[0]
            else:
                output_values = []
                tgrid = []
                ndim = 1


    # make list of time grid and values
    tgrid = list(chain.from_iterable(tgrid.full().tolist()))
    output_values = list(chain.from_iterable(output_values))

    return output_values, tgrid, ndim
Пример #20
0
    def __integrate_integral_outputs(self, Integral_outputs_list,
                                     integral_outputs_deriv, model, tf):

        # number of integral outputs
        ni = model.integral_outputs.cat.shape[0]

        if ni > 0:

            # constant term
            i0 = model.integral_outputs(
                cas.vertcat(*Integral_outputs_list)[-ni:])

            # evaluate derivative functions
            derivative_list = []
            for i in range(self.__d):
                derivative_list += [
                    model.integral_outputs(integral_outputs_deriv[:, i])
                ]

            integral_output = OrderedDict()
            # integrate using collocation
            for name in list(model.integral_outputs.keys()):

                # get derivatives
                derivatives = []
                for i in range(len(derivative_list)):
                    derivatives.append(derivative_list[i][name])

                # compute state values at collocation nodes
                integral_output[name] = tf / self.__n_k * cas.mtimes(
                    self.__Lambda.T, cas.vertcat(*derivatives))

                # compute state value at end of collocation interval
                integral_output_continuity = 0.0

                for j in range(self.__d):
                    integral_output_continuity += self.__coeff_continuity[
                        j + 1] * integral_output[name][j]

                integral_output[name] = cas.vertcat(
                    integral_output[name], integral_output_continuity)

                # add constant term
                integral_output[name] += i0[name]

            # build Integral_outputs_list
            for i in range(integral_output[list(
                    integral_output.keys())[0]].shape[0]):
                for name in list(model.integral_outputs.keys()):
                    Integral_outputs_list.append(integral_output[name][i])

        else:

            32.0

        return Integral_outputs_list
Пример #21
0
def create_constraint_outputs(g_list, g_bounds, g_struct, V, P):

    g = g_struct(cas.vertcat(*g_list))
    g_fun = cas.Function('g_fun',[V, P], [g.cat])
    g_jacobian_fun = cas.Function('g_jacobian_fun',[V,P],[g.cat, cas.jacobian(g.cat, V.cat)])

    g_bounds['lb'] = cas.vertcat(*g_bounds['lb'])
    g_bounds['ub'] = cas.vertcat(*g_bounds['ub'])

    return g, g_fun, g_jacobian_fun, g_bounds
Пример #22
0
def coll_slice_to_vec(coll_slice):

    coll_list = []

    for i in range(len(coll_slice)):
        coll_list.append(cas.vertcat(*coll_slice[i]))

    coll_vec = cas.vertcat(*coll_list)

    return coll_vec
Пример #23
0
def merge_xd_values(V, name, dim, plot_dict, cosmetics):

    # read in inputs

    discretization = plot_dict['discretization']
    if discretization == 'direct_collocation':
        scheme = plot_dict['options']['nlp']['collocation']['scheme']
        tgrid_coll = plot_dict['time_grids']['coll']

        # total time points
        tgrid_x_coll = plot_dict['time_grids']['x_coll']

        # interval time points
    tgrid_x = plot_dict['time_grids']['x']

    if discretization == 'multiple_shooting':
        # take interval values
        xd_values = np.array(cas.vertcat(*V['xd', :, name, dim]).full())
        tgrid = tgrid_x

    elif discretization == 'direct_collocation':
        if scheme != 'radau':
            xd_values = []
            # merge interval and node values
            for k in range(plot_dict['n_k'] + 1):
                # add interval values
                xd_values = cas.vertcat(xd_values, V['xd', k, name, dim])
                if (cosmetics['plot_coll'] and k < plot_dict['n_k']):
                    # add node values
                    xd_values = cas.vertcat(
                        xd_values,
                        cas.vertcat(*V['coll_var', k, :, 'xd', name,
                                       dim]).full())

            if cosmetics['plot_coll']:
                tgrid = tgrid_x_coll
            else:
                tgrid = tgrid_x

        elif scheme == 'radau':
            if cosmetics['plot_coll']:
                # add node values
                xd_values = np.array(
                    struct_op.coll_slice_to_vec(V['coll_var', :, :, 'xd', name,
                                                  dim]))
                tgrid = tgrid_coll
            else:
                xd_values = []
                tgrid = []

    # make list of time grid and values
    tgrid = list(chain.from_iterable(tgrid.full().tolist()))
    xd_values = list(chain.from_iterable(xd_values))

    return xd_values, tgrid
Пример #24
0
def get_q_extrema_in_dimension(dim, plot_dict, cosmetics):

    temp_min = 1.e5
    temp_max = -1.e5

    if dim == 'x' or dim == '0':
        jdx = 0
        dim = 'x'
    elif dim == 'y' or dim == '1':
        jdx = 1
        dim = 'y'
    elif dim == 'z' or dim == '2':
        jdx = 2
        dim = 'z'
    else:
        jdx = 0
        dim = 'x'

        message = 'selected dimension for q_limits not supported. setting dimension to x'
        awelogger.logger.warning(message)

    for name in list(plot_dict['xd'].keys()):
        if name[0] == 'q':
            temp_min = np.min(
                cas.vertcat(temp_min, np.min(plot_dict['xd'][name][jdx])))
            temp_max = np.max(
                cas.vertcat(temp_max, np.max(plot_dict['xd'][name][jdx])))

        if name[0] == 'w' and name[1] == dim and cosmetics['trajectory'][
                'wake_nodes']:
            vals = np.array(cas.vertcat(
                *plot_dict['xd'][name])) * vortex_tools.get_position_scale(
                    plot_dict['options']['model'])
            temp_min = np.min(cas.vertcat(temp_min, np.min(vals)))
            temp_max = np.max(cas.vertcat(temp_max, np.max(vals)))

    # get margins
    margin = cosmetics['trajectory']['margin']
    lmargin = 1.0 - margin
    umargin = 1.0 + margin

    if temp_min > 0.0:
        temp_min = lmargin * temp_min
    else:
        temp_min = umargin * temp_min

    if temp_max < 0.0:
        temp_max = lmargin * temp_max
    else:
        temp_max = umargin * temp_max

    q_lim = [temp_min, temp_max]

    return q_lim
Пример #25
0
    def __compute_time_grids(self, index):
        """ Compute NLP time grids based in periodic index
        """

        Tref = self.__ref_dict['time_grids']['ip'][-1]
        t_grid = self.__t_grid_coll + index*self.__ts
        t_grid = ct.vertcat(*list(map(lambda x: x % Tref, t_grid))).full().squeeze()

        t_grid_x = self.__t_grid_x_coll + index*self.__ts
        t_grid_x = ct.vertcat(*list(map(lambda x: x % Tref, t_grid_x))).full().squeeze()

        return t_grid, t_grid_x
Пример #26
0
def get_final_residual(options, atmos, wind, variables, parameters, outputs,
                       architecture):
    resi = []

    ind_resi = get_induction_final_residual(options, atmos, wind, variables,
                                            parameters, outputs, architecture)
    resi = cas.vertcat(resi, ind_resi)

    spec_resi = get_specific_residuals(options, atmos, wind, variables,
                                       parameters, outputs, architecture)
    resi = cas.vertcat(resi, spec_resi)

    return resi
Пример #27
0
def collect_active_inequality_constraints(health_solver_options, nlp, solution,
                                          p_fix_num):

    active_threshold = health_solver_options['thresh']['active']
    v_vals = solution['x']

    active_constraints = []
    list_names = []
    active_sym = []

    [g_ineq, g_ineq_names, ineq_fun] = collect_inequality_constraints(nlp)

    # list the evaluated constraints at solution
    ocp_cstr_list = nlp.ocp_cstr_list

    g = nlp.g
    g_fun = nlp.g_fun
    g_vals = g_fun(v_vals, p_fix_num)
    g_sym = cas.SX.sym('g_sym', g.shape)
    g_names = ocp_cstr_list.get_name_list('all')

    # list the multipliers lambda at solution
    lam_vals = solution['lam_g']

    g_ineq_vals = ineq_fun(g_vals)
    lambda_ineq_vals = ineq_fun(lam_vals)
    g_ineq_sym = ineq_fun(g_sym)

    if not g_ineq_sym.shape[0] == 0:
        for gdx in range(g_ineq.shape[0]):

            local_g = g_ineq_vals[gdx]
            local_lam = lambda_ineq_vals[gdx]
            local_name = g_ineq_names[gdx]

            # if eval_constraint is small, then constraint is active. or.
            # if lambda >> eval_constraint, then: constraint is active
            if local_lam**2. > (active_threshold * local_g)**2.:

                # append active constraints to active_list
                active_constraints = cas.vertcat(active_constraints, local_g)

                list_names += [local_name]
                active_sym = cas.vertcat(active_sym, g_ineq_sym[gdx])

    active_fun = cas.Function('active_fun', [g_sym], [active_sym])

    # return active_list
    return active_constraints, list_names, active_fun
Пример #28
0
def collect_equality_and_active_inequality_constraints(health_solver_options,
                                                       nlp, solution, arg):

    var_sym = cas.SX.sym('var_sym', nlp.V.shape)
    p_sym = cas.SX.sym('p_sym', nlp.P.shape)
    ubx_sym = cas.SX.sym('ubx_sym', arg['ubx'].shape)
    lbx_sym = cas.SX.sym('lbx_sym', arg['lbx'].shape)

    lam_x_sym = cas.SX.sym('lam_x_sym', solution['lam_x'].shape)
    lam_g_sym = cas.SX.sym('lam_g_sym', solution['lam_g'].shape)

    p_fix_num = nlp.P(arg['p'])
    var_constraint_functions = collect_var_constraints(health_solver_options,
                                                       nlp, arg, solution)

    [equality_constraints, eq_labels,
     eq_fun] = collect_equality_constraints(nlp)
    [active_inequality_constraints, active_ineq_labels, active_fun
     ] = collect_active_inequality_constraints(health_solver_options, nlp,
                                               solution, p_fix_num)

    equality_constraints = eq_fun(nlp.g_fun(var_sym, p_sym))
    active_inequality_constraints = active_fun(nlp.g_fun(var_sym, p_sym))

    equality_lambdas = eq_fun(lam_g_sym)
    active_inequality_lambdas = active_fun(lam_g_sym)

    all_active_var_bounds = var_constraint_functions['all_act_fun'](var_sym,
                                                                    lbx_sym,
                                                                    ubx_sym)
    all_active_var_lambdas = var_constraint_functions['all_act_lam_fun'](
        lam_x_sym)
    all_active_var_labels = var_constraint_functions['all_act_labels']

    stacked_constraints = cas.vertcat(equality_constraints,
                                      active_inequality_constraints,
                                      all_active_var_bounds)
    stacked_cstr_fun = cas.Function('stacked_cstr_fun',
                                    [var_sym, p_sym, lbx_sym, ubx_sym],
                                    [stacked_constraints])

    stacked_lambdas = cas.vertcat(equality_lambdas, active_inequality_lambdas,
                                  all_active_var_lambdas)
    stacked_lam_fun = cas.Function('stacked_lam_fun', [lam_x_sym, lam_g_sym],
                                   [stacked_lambdas])

    stacked_labels = eq_labels + active_ineq_labels + all_active_var_labels

    return stacked_cstr_fun, stacked_lam_fun, stacked_labels
Пример #29
0
    def get_reference(self, t_grid, t_grid_x):
        """ Interpolate reference on NLP time grids.
        """

        ip_dict = {}
        V_ref = self.__trial.nlp.V(0.0)
        for var_type in ['xd','u','xa']:
            ip_dict[var_type] = []
            for name in list(self.__trial.model.variables_dict[var_type].keys()):
                for dim in range(self.__trial.model.variables_dict[var_type][name].shape[0]):
                    if var_type == 'xd':
                        ip_dict[var_type].append(self.__interpolator(t_grid_x, name, dim,var_type))
                    else:
                        ip_dict[var_type].append(self.__interpolator(t_grid, name, dim,var_type))
            if self.__mpc_options['ref_interpolator'] == 'poly':
                ip_dict[var_type] = ct.horzcat(*ip_dict[var_type]).T
            elif self.__mpc_options['ref_interpolator'] == 'spline':
                ip_dict[var_type] = ct.vertcat(*ip_dict[var_type])

        counter = 0
        counter_x = 0
        V_list = []
        for k in range(self.__N):
            for j in range(self.__trial.nlp.d+1):
                if j == 0:
                    V_list.append(ip_dict['xd'][:,counter_x])
                    counter_x += 1
                else:
                    for var_type in ['xd','xa','u']:
                        if var_type == 'xd':
                            V_list.append(ip_dict[var_type][:,counter_x])
                            counter_x += 1
                        else:
                            V_list.append(ip_dict[var_type][:,counter])
                    counter += 1

        V_list.append(ip_dict['xd'][:,counter_x])

        for name in self.__trial.model.variables_dict['theta'].keys():
            if name != 't_f':
                V_list.append(self.__pocp_trial.optimization.V_opt['theta',name])
            else:
                V_list.append(self.__N*self.__ts)

        for var_type in ['phi', 'xi']:
            V_list.append(np.zeros(self.__trial.nlp.V[var_type].shape))
        V_ref = self.__trial.nlp.V(ct.vertcat(*V_list))

        return V_ref
Пример #30
0
    def __set_implicit_variables(self, options, variables, parameters,
                                 z_at_time):
        """Set non-lifted implicit variables xa, xl and xddot to value computed
        using rootfinder

        @param options nlp options
        @param variables vars at a specific time
        @param parameters params at a specific time
        @param z_at_time alg vars computed with rootfinder
        @return variables variables struct containing implicit variable values
        """

        if (not options['lift_xddot'] or not options['lift_xa']):

            # fill in result if not lifted
            var_list = []
            for var_type in list(variables.keys()):
                if var_type == 'xddot':
                    if not options['lift_xddot']:
                        var_list.append(z_at_time['xddot'])
                    else:
                        var_list.append(variables['xddot'])
                elif var_type in set(['xa', 'xl']):
                    if not options['lift_xa']:
                        var_list.append(z_at_time[var_type])
                    else:
                        var_list.append(variables[var_type])
                else:
                    var_list.append(variables[var_type])

            variables = variables(cas.vertcat(*var_list))

        return variables