예제 #1
0
def gen_solver_input(x0, u_prev, ref, ud_prev, disturbance):
    # TODO: Allow disturbance to be None?
    return ca.vertcat(
        ca.vec(x0),
        ca.vertcat(
            ca.vec(u_prev),
            ca.vertcat(ca.vec(ref),
                       ca.vertcat(ca.vec(ud_prev), ca.vec(disturbance)))))
예제 #2
0
파일: ad.py 프로젝트: ml-lab/casadi
    def test_MXevalSX(self):
        n = array([1.2, 2.3, 7, 1.4])
        for inputshape in ["column", "row", "matrix"]:
            for outputshape in ["column", "row", "matrix"]:
                for inputtype in ["dense", "sparse"]:
                    for outputtype in ["dense", "sparse"]:
                        self.message(
                            "evalSX on MX. Input %s %s, Output %s %s" %
                            (inputtype, inputshape, outputtype, outputshape))
                        f = MXFunction(
                            "f", self.mxinputs[inputshape][inputtype],
                            self.mxoutputs[outputshape][outputtype](
                                self.mxinputs[inputshape][inputtype][0]))
                        f.setInput(n)
                        f.evaluate()
                        r = f.getOutput()
                        J = self.jacobians[inputtype][outputtype](*n)

                        seeds = [[1, 0, 0, 0], [0, 2, 0, 0],
                                 [1.2, 4.8, 7.9, 4.6]]

                        y = SX.sym("y", f.getInput().sparsity())

                        fseeds = map(
                            lambda x: DMatrix(f.getInput().sparsity(), x),
                            seeds)
                        aseeds = map(
                            lambda x: DMatrix(f.getOutput().sparsity(), x),
                            seeds)
                        with internalAPI():
                            res = f.call([y])
                            fwdsens = f.callForward([y], res,
                                                    map(lambda x: [x], fseeds))
                            adjsens = f.callReverse([y], res,
                                                    map(lambda x: [x], aseeds))
                        fwdsens = map(lambda x: x[0], fwdsens)
                        adjsens = map(lambda x: x[0], adjsens)

                        fe = SXFunction("fe", [y], res)

                        fe.setInput(n)
                        fe.evaluate()

                        self.checkarray(r, fe.getOutput())

                        for sens, seed in zip(fwdsens, fseeds):
                            fe = SXFunction("fe", [y], [sens])
                            fe.setInput(n)
                            fe.evaluate()
                            self.checkarray(c.vec(fe.getOutput().T),
                                            mul(J, c.vec(seed.T)), "AD")

                        for sens, seed in zip(adjsens, aseeds):
                            fe = SXFunction("fe", [y], [sens])
                            fe.setInput(n)
                            fe.evaluate()
                            self.checkarray(c.vec(fe.getOutput().T),
                                            mul(J.T, c.vec(seed.T)), "AD")
예제 #3
0
파일: ad.py 프로젝트: meduardov/casadi
    def test_MXeval_sx(self):
        n = array([1.2, 2.3, 7, 1.4])
        for inputshape in ["column", "row", "matrix"]:
            for outputshape in ["column", "row", "matrix"]:
                for inputtype in ["dense", "sparse"]:
                    for outputtype in ["dense", "sparse"]:
                        self.message(
                            "eval_sx on MX. Input %s %s, Output %s %s" %
                            (inputtype, inputshape, outputtype, outputshape))
                        f = Function(
                            "f", self.mxinputs[inputshape][inputtype],
                            self.mxoutputs[outputshape][outputtype](
                                self.mxinputs[inputshape][inputtype][0]))
                        f_in = [0] * f.n_in()
                        f_in[0] = n
                        f_out = f(f_in)
                        r = f_out[0]
                        J = self.jacobians[inputtype][outputtype](*n)

                        seeds = [[1, 0, 0, 0], [0, 2, 0, 0],
                                 [1.2, 4.8, 7.9, 4.6]]

                        y = SX.sym("y", f.sparsity_in(0))

                        fseeds = map(lambda x: DM(f.sparsity_in(0), x), seeds)
                        aseeds = map(lambda x: DM(f.sparsity_out(0), x), seeds)
                        res = f(y)
                        fwdsens = f.forward([y], [res],
                                            map(lambda x: [x], fseeds))
                        adjsens = f.reverse([y], [res],
                                            map(lambda x: [x], aseeds))
                        fwdsens = map(lambda x: x[0], fwdsens)
                        adjsens = map(lambda x: x[0], adjsens)

                        fe = Function("fe", [y], [res])

                        fe_in = [0] * fe.n_in()
                        fe_in[0] = n
                        fe_out = fe(fe_in)

                        self.checkarray(r, fe_out[0])

                        for sens, seed in zip(fwdsens, fseeds):
                            fe = Function("fe", [y], [sens])
                            fe_in = [0] * fe.n_in()
                            fe_in[0] = n
                            fe_out = fe(fe_in)
                            self.checkarray(c.vec(fe_out[0].T),
                                            mtimes(J, c.vec(seed.T)), "AD")

                        for sens, seed in zip(adjsens, aseeds):
                            fe = Function("fe", [y], [sens])
                            fe_in = [0] * fe.n_in()
                            fe_in[0] = n
                            fe_out = fe(fe_in)
                            self.checkarray(c.vec(fe_out[0].T),
                                            mtimes(J.T, c.vec(seed.T)), "AD")
예제 #4
0
파일: ad.py 프로젝트: casadi/casadi
    def test_MXeval_sx(self):
        n = array([1.2, 2.3, 7, 1.4])
        for inputshape in ["column", "row", "matrix"]:
            for outputshape in ["column", "row", "matrix"]:
                for inputtype in ["dense", "sparse"]:
                    for outputtype in ["dense", "sparse"]:
                        self.message(
                            "eval_sx on MX. Input %s %s, Output %s %s"
                            % (inputtype, inputshape, outputtype, outputshape)
                        )
                        f = Function(
                            "f",
                            self.mxinputs[inputshape][inputtype],
                            self.mxoutputs[outputshape][outputtype](self.mxinputs[inputshape][inputtype][0]),
                        )
                        f_in = [0] * f.n_in()
                        f_in[0] = n
                        f_out = f(f_in)
                        r = f_out[0]
                        J = self.jacobians[inputtype][outputtype](*n)

                        seeds = [[1, 0, 0, 0], [0, 2, 0, 0], [1.2, 4.8, 7.9, 4.6]]

                        y = SX.sym("y", f.sparsity_in(0))

                        fseeds = [DM(f.sparsity_in(0), x) for x in seeds]
                        aseeds = [DM(f.sparsity_out(0), x) for x in seeds]
                        res = f(y)
                        fwdsens = f.forward([y], [res], [[x] for x in fseeds])
                        adjsens = f.reverse([y], [res], [[x] for x in aseeds])
                        fwdsens = [x[0] for x in fwdsens]
                        adjsens = [x[0] for x in adjsens]

                        fe = Function("fe", [y], [res])

                        fe_in = [0] * fe.n_in()
                        fe_in[0] = n
                        fe_out = fe(fe_in)

                        self.checkarray(r, fe_out[0])

                        for sens, seed in zip(fwdsens, fseeds):
                            fe = Function("fe", [y], [sens])
                            fe_in = [0] * fe.n_in()
                            fe_in[0] = n
                            fe_out = fe(fe_in)
                            self.checkarray(c.vec(fe_out[0].T), mtimes(J, c.vec(seed.T)), "AD")

                        for sens, seed in zip(adjsens, aseeds):
                            fe = Function("fe", [y], [sens])
                            fe_in = [0] * fe.n_in()
                            fe_in[0] = n
                            fe_out = fe(fe_in)
                            self.checkarray(c.vec(fe_out[0].T), mtimes(J.T, c.vec(seed.T)), "AD")
예제 #5
0
파일: ad.py 프로젝트: ml-lab/casadi
    def test_MXevalMX(self):
        n = array([1.2, 2.3, 7, 1.4])
        for inputshape in ["column", "row", "matrix"]:
            for outputshape in ["column", "row", "matrix"]:
                for inputtype in ["dense", "sparse"]:
                    for outputtype in ["dense", "sparse"]:
                        self.message(
                            "evalMX on MX. Input %s %s, Output %s %s" %
                            (inputtype, inputshape, outputtype, outputshape))
                        f = MXFunction(
                            "f", self.mxinputs[inputshape][inputtype],
                            self.mxoutputs[outputshape][outputtype](
                                self.mxinputs[inputshape][inputtype][0]))
                        f_in = DMatrix(f.inputSparsity(), n)
                        [r] = f([f_in])
                        J = self.jacobians[inputtype][outputtype](*n)

                        seeds = [[1, 0, 0, 0], [0, 2, 0, 0],
                                 [1.2, 4.8, 7.9, 4.6]]

                        y = MX.sym("y", f.inputSparsity())

                        fseeds = map(lambda x: DMatrix(f.inputSparsity(), x),
                                     seeds)
                        aseeds = map(lambda x: DMatrix(f.outputSparsity(), x),
                                     seeds)
                        with internalAPI():
                            res = f.call([y])
                            fwdsens = f.callForward([y], res,
                                                    map(lambda x: [x], fseeds))
                            adjsens = f.callReverse([y], res,
                                                    map(lambda x: [x], aseeds))
                        fwdsens = map(lambda x: x[0], fwdsens)
                        adjsens = map(lambda x: x[0], adjsens)

                        fe = MXFunction('fe', [y], res)

                        [re] = fe([f_in])

                        self.checkarray(r, re)

                        for sens, seed in zip(fwdsens, fseeds):
                            fe = MXFunction("fe", [y], [sens])
                            [re] = fe([f_in])
                            self.checkarray(c.vec(re), mul(J, c.vec(seed)),
                                            "AD")

                        for sens, seed in zip(adjsens, aseeds):
                            fe = MXFunction("fe", [y], [sens])
                            [re] = fe([f_in])
                            self.checkarray(c.vec(re), mul(J.T, c.vec(seed)),
                                            "AD")
예제 #6
0
    def solve(self):
        """
        define extra variables

        homotopy parameters >= 0!!
        """
        if not self.prob['s']:
            self.set_grid()
        N = self.options['N']
        Nc = self.options['Nc']
        self.prob['vars'] = [cas.ssym("b", N + 1, self.sys.order),
                             cas.ssym("h", Nc, self.h.size1())]
        # Vectorize variables
        V = cas.vertcat([
            cas.vec(self.prob['vars'][0]),
            cas.vec(self.prob['vars'][1])
            ])
        self._make_constraints()
        self._make_objective()
        self._h_init()
        con = cas.SXFunction([V], [self.prob['con'][0]])
        obj = cas.SXFunction([V], [self.prob['obj']])
        if self.options.get('solver') == 'Ipopt':
            solver = cas.IpoptSolver(obj, con)
        else:
            print """Other solver than Ipopt are currently not supported,
            switching to Ipopt"""
            solver = cas.IpoptSolver(obj, con)
        for option, value in self.options.iteritems():
            if solver.hasOption(option):
                solver.setOption(option, value)
        solver.init()
        # Setting constraints
        solver.setInput(cas.vertcat(self.prob['con'][1]), "lbg")
        solver.setInput(cas.vertcat(self.prob['con'][2]), "ubg")
        solver.setInput(
            cas.vertcat([
                [np.inf] * self.sys.order * (N + 1),
                [1] * self.h.size1() * Nc
                ]), "ubx"
            )
        solver.setInput(
            cas.vertcat([
                [0] * (N + 1),
                [-np.inf] * (self.sys.order - 1) * (N + 1),
                [0] * self.h.size1() * Nc
                ]), "lbx"
            )
        solver.solve()
        self.prob['solver'] = solver
        self._get_solution()
예제 #7
0
    def assert_model_equivalent_numeric(self, A, B, tol=1e-9):
        self.assertEqual(len(A.states), len(B.states))
        self.assertEqual(len(A.der_states), len(B.der_states))
        self.assertEqual(len(A.inputs), len(B.inputs))
        self.assertEqual(len(A.outputs), len(B.outputs))
        self.assertEqual(len(A.constants), len(B.constants))
        self.assertEqual(len(A.constant_values), len(B.constant_values))
        self.assertEqual(len(A.parameters), len(B.parameters))
        self.assertEqual(len(A.equations), len(B.equations))

        for a, b in zip(A.constant_values, B.constant_values):
            delta = ca.vec(a - b)
            for i in range(delta.size1()):
                test = float(delta[i]) <= tol
                self.assertTrue(test)

        this = A.get_function()
        that = B.get_function()

        this_mx = this.mx_in()
        that_mx = that.mx_in()
        this_in = [e.name() for e in this_mx]
        that_in = [e.name() for e in that_mx]

        that_from_this = []
        this_mx_dict = dict(zip(this_in, this_mx))
        for e in that_in:
            self.assertTrue(e in this_in)
            that_from_this.append(this_mx_dict[e])
        that = ca.Function('f', this_mx, that.call(that_from_this))

        np.random.seed(0)

        args_in = []
        for i in range(this.n_in()):
            sp = this.sparsity_in(0)
            r = ca.DM(sp, np.random.random(sp.nnz()))
            args_in.append(r)

        this_out = this.call(args_in)
        that_out = that.call(args_in)

        for i, (a, b) in enumerate(zip(this_out, that_out)):
            test = float(ca.norm_2(ca.vec(a - b))) <= tol
            if not test:
                print("Expr mismatch")
                print("A: ", A.equations[i], a)
                print("B: ", B.equations[i], b)
            self.assertTrue(test)

        return True
예제 #8
0
파일: ad.py 프로젝트: mzanon/casadi
  def test_MXevalSX(self):
    n=array([1.2,2.3,7,1.4])
    for inputshape in ["column","row","matrix"]:
      for outputshape in ["column","row","matrix"]:
        for inputtype in ["dense","sparse"]:
          for outputtype in ["dense","sparse"]:
            self.message("evalSX on MX. Input %s %s, Output %s %s" % (inputtype,inputshape,outputtype,outputshape) )
            f=MXFunction(self.mxinputs[inputshape][inputtype],self.mxoutputs[outputshape][outputtype](self.mxinputs[inputshape][inputtype][0]))
            f.init()
            f.setInput(n)
            f.evaluate()
            r = f.getOutput()
            J = self.jacobians[inputtype][outputtype](*n)
            
            seeds = [[1,0,0,0],[0,2,0,0],[1.2,4.8,7.9,4.6]]
            
            y = SX.sym("y",f.input().sparsity())
            
            fseeds = map(lambda x: DMatrix(f.input().sparsity(),x), seeds)
            aseeds = map(lambda x: DMatrix(f.output().sparsity(),x), seeds)
            with internalAPI():
              res = f.call([y])
              fwdsens = f.callForward([y],res,map(lambda x: [x],fseeds))
              adjsens = f.callReverse([y],res,map(lambda x: [x],aseeds))
            fwdsens = map(lambda x: x[0],fwdsens)
            adjsens = map(lambda x: x[0],adjsens)
            
            fe = SXFunction([y],res)
            fe.init()
            
            fe.setInput(n)
            fe.evaluate()
            
            self.checkarray(r,fe.getOutput())
            
            for sens,seed in zip(fwdsens,fseeds):
              fe = SXFunction([y],[sens])
              fe.init()
              fe.setInput(n)
              fe.evaluate()
              self.checkarray(c.vec(fe.getOutput().T),mul(J,c.vec(seed.T)),"AD") 

            for sens,seed in zip(adjsens,aseeds):
              fe = SXFunction([y],[sens])
              fe.init()
              fe.setInput(n)
              fe.evaluate()
              self.checkarray(c.vec(fe.getOutput().T),mul(J.T,c.vec(seed.T)),"AD")
예제 #9
0
    def estimate(self, t_k, y_k, u_k):
        if not self._checked:
            self._check()
            self._checked = True
        if not self._types_fixed:
            self._fix_types()
            self._types_fixed = True

        x_mean = self.x_mean
        x_cov = self.p_k

        (x_hat_k_minus, p_k_minus, y_hat_k_minus, p_yk_yk,
         k_gain) = self._priori_update(x_mean,
                                       x_cov,
                                       u=u_k,
                                       p=self.p,
                                       theta=self.theta)

        x_hat_k = x_hat_k_minus + mtimes(k_gain, (y_k - y_hat_k_minus))
        p_k = p_k_minus - mtimes(k_gain, mtimes(p_yk_yk, k_gain.T))

        self.x_mean = x_hat_k
        self.p_k = p_k

        self.dataset.insert_data('x', t_k, self.x_mean)
        self.dataset.insert_data('P', t_k, vec(self.p_k))

        return x_hat_k, p_k
예제 #10
0
 def setup_nlp(self, μ0, Σ0):
     # initialize variables
     self.opti = cs.Opti()
     self.U = self.opti.variable(3, self.N)
     self.opti.set_initial(self.U, 0)
     p = cs.vec(Σ0)
     x = μ0
     x_initial_guess = μ0
     self.cost = 0.0
     for tt in range(self.N):
         u_initial_guess = self.p_control(x_initial_guess)
         self.opti.set_initial(self.U[:, tt], u_initial_guess)
         x_initial_guess = self.f(x_initial_guess, u_initial_guess)
         x_new = self.f(x, self.U[:, tt])
         p = self.cov_trans(x, x_new, self.U[:, tt], p)
         x = x_new
         self.cost += self.c_stage(x, self.U[:, tt], p)
     self.cost += self.c_term(x, p)
     # set up minimization problem
     self.opti.minimize(self.cost)
     # control constraints
     self.opti.subject_to(
         self.opti.bounded(self.u_param_min[0], self.U[0, :],
                           self.u_param_max[0]))
     self.opti.subject_to(
         self.opti.bounded(self.u_param_min[1], self.U[1, :],
                           self.u_param_max[1]))
     self.opti.subject_to(
         self.opti.bounded(self.u_param_min[2], self.U[2, :],
                           self.u_param_max[2]))
예제 #11
0
 def setup_nlp(self, μ0, Σ0):
     # initialize variables
     self.opti = cs.Opti()
     self.U = self.opti.variable(2, self.N)
     self.opti.set_initial(self.U, 0)
     p = cs.vec(Σ0)
     p_initial_guess = p
     x = μ0
     x_initial_guess = μ0
     v = np.zeros(μ0.shape[0] - 2)
     self.cost = 0.0
     for tt in range(self.N):
         u_initial_guess = self.gradient_control(x_initial_guess,
                                                 p_initial_guess)
         self.opti.set_initial(self.U[:, tt], u_initial_guess)
         x_new = self.f(x, self.U[:, tt])
         p = self.cov_trans(x, self.U[:, tt], v, p)
         p_initial_guess = self.cov_trans(x_initial_guess, u_initial_guess,
                                          v, p_initial_guess)
         x_initial_guess = self.f(x_initial_guess, u_initial_guess)
         x = x_new
         self.cost += self.c_stage(self.U[:, tt])
     self.cost += self.c_term(p)
     # set up minimization problem
     self.opti.minimize(self.cost)
     # control constraints
     self.opti.subject_to(
         self.opti.bounded(self.u_param_min[0], self.U[0, :],
                           self.u_param_max[0]))
     self.opti.subject_to(
         self.opti.bounded(self.u_param_min[1], self.U[1, :],
                           self.u_param_max[1]))
예제 #12
0
    def exitForEquation(self, tree):
        logger.debug('exitForEquation')

        f = self.for_loops.pop()
        if len(f.values) > 0:
            indexed_symbols = list(f.indexed_symbols.keys())
            args = [f.index_variable] + indexed_symbols
            expr = ca.vcat([ca.vec(self.get_mx(e)) for e in tree.equations])
            free_vars = ca.symvar(expr)

            arg_names = [arg.name() for arg in args]
            free_vars = [e for e in free_vars if e.name() not in arg_names]
            all_args = args + free_vars
            F = ca.Function('loop_body', all_args, [expr])

            indexed_symbols_full = []
            for k in indexed_symbols:
                s = f.indexed_symbols[k]
                orig_symbol = self.nodes[self.current_class][s.tree.name]
                indexed_symbol = orig_symbol[s.indices]
                if s.transpose:
                    indexed_symbol = ca.transpose(indexed_symbol)
                indexed_symbols_full.append(indexed_symbol)

            Fmap = F.map("map", self.map_mode, len(f.values),
                         list(range(len(args), len(all_args))), [])
            res = Fmap.call([f.values] + indexed_symbols_full + free_vars)

            self.src[tree] = res[0].T
        else:
            self.src[tree] = ca.MX()
예제 #13
0
    def test_MXeval_mx(self):
        n = array([1.2, 2.3, 7, 1.4])
        for inputshape in ["column", "row", "matrix"]:
            for outputshape in ["column", "row", "matrix"]:
                for inputtype in ["dense", "sparse"]:
                    for outputtype in ["dense", "sparse"]:
                        self.message(
                            "eval_mx on MX. Input %s %s, Output %s %s" %
                            (inputtype, inputshape, outputtype, outputshape))
                        f = Function(
                            "f", self.mxinputs[inputshape][inputtype],
                            self.mxoutputs[outputshape][outputtype](
                                self.mxinputs[inputshape][inputtype][0]))
                        f_in = DM(f.sparsity_in(0), n)
                        r = f(f_in)
                        J = self.jacobians[inputtype][outputtype](*n)

                        seeds = [[1, 0, 0, 0], [0, 2, 0, 0],
                                 [1.2, 4.8, 7.9, 4.6]]

                        y = MX.sym("y", f.sparsity_in(0))

                        fseeds = [DM(f.sparsity_in(0), x) for x in seeds]
                        aseeds = [DM(f.sparsity_out(0), x) for x in seeds]
                        res = f(y)
                        fwdsens = f.forward([y], [res], [[x] for x in fseeds])
                        adjsens = f.reverse([y], [res], [[x] for x in aseeds])
                        fwdsens = [x[0] for x in fwdsens]
                        adjsens = [x[0] for x in adjsens]

                        fe = Function('fe', [y], [res])

                        re = fe(f_in)

                        self.checkarray(r, re)

                        for sens, seed in zip(fwdsens, fseeds):
                            fe = Function("fe", [y], [sens])
                            re = fe(f_in)
                            self.checkarray(c.vec(re), mtimes(J, c.vec(seed)),
                                            "AD")

                        for sens, seed in zip(adjsens, aseeds):
                            fe = Function("fe", [y], [sens])
                            re = fe(f_in)
                            self.checkarray(c.vec(re),
                                            mtimes(J.T, c.vec(seed)), "AD")
예제 #14
0
def casadi_struct2vec(s):
  flat = []
  if isinstance(s,OrderedDict):
    for f in s.keys():
      flat.append(casadi_struct2vec(s[f]))
    return C.vertcat(flat)
  else:
    return C.vec(s)
예제 #15
0
 def out(self, inn):
     inn = cas.vec(inn)
     assert self.number_in == inn.size()[0], 'Wrong number of inputs given.'
     if self.activation == 'softmax':
         y = self.activation_function(self.weights * inn)
     else:
         y = self.activation_function(cas.dot(self.weights, inn))
     return y
예제 #16
0
파일: helpers.py 프로젝트: eglrp/optispline
 def assertEqualTensor(self, a, b, tol=1e-9):
     try:
         a = C.vec(C.DM(a))
     except:
       try:
         a = a.data()
       except:
         a = a.ravel('F')
     try:
         b = C.vec(C.DM(b))
     except:
       try:
         b = b.data()
       except:
         b = b.ravel('F')
     self.assertTrue(C.DM(a).is_regular())
     self.assertTrue(C.DM(b).is_regular())
     self.assertTrue(float(C.norm_inf(a-b)) < tol)
예제 #17
0
    def create_variable_polynomial_approximation(self,
                                                 size,
                                                 degree,
                                                 name='var_appr',
                                                 tau=None,
                                                 point_at_t0=False):
        if not isinstance(name, list):
            name = [name + '_' + str(i) for i in range(size)]

        if tau is None:
            tau = self.model.tau  # Collocation point

        if degree == 1:
            if size > 0:
                points = vertcat(
                    *[SX.sym(name[s], 1, degree) for s in range(size)])
            else:
                points = SX.sym('empty_sx', size, degree)
            par = vec(points)
            u_pol = points
        else:
            if point_at_t0:
                if size > 0:
                    points = vertcat(
                        *[SX.sym(name[s], 1, degree + 1) for s in range(size)])
                else:
                    points = SX.sym('empty_sx', size, degree)
                tau, ell_list = self._create_lagrangian_polynomial_basis(
                    degree, starting_index=0, tau=tau)
                u_pol = sum(
                    [ell_list[j] * points[:, j] for j in range(0, degree + 1)])
            else:
                if size > 0:
                    points = vertcat(
                        *[SX.sym(name[s], 1, degree) for s in range(size)])
                else:
                    points = SX.sym('empty_sx', size, degree)
                tau, ell_list = self._create_lagrangian_polynomial_basis(
                    degree, starting_index=1, tau=tau)
                u_pol = sum(
                    [ell_list[j] * points[:, j] for j in range(0, degree)])
            par = vec(points)

        return u_pol, par
예제 #18
0
    def buildOpt(self, MU, P, S, dt, tgt, H):
        self.model = casadi.Opti()

        self.sVar = self.model.variable(P.shape[0], 1)
        self.pVar = self.model.variable(P.shape[0], P.shape[1])
        self.stateVar = self.model.variable(P.shape[0], H)
        self.tVar = self.model.variable(P.shape[0], H)
        self.initX = self.model.parameter(P.shape[0], 1)
        # self.E_abs = self.model.variable(1,H)

        #self.model.subject_to(self.model.bounded(0.0,self.sVar,3000))
        self.model.subject_to(self.sVar >= 0)
        self.model.subject_to(self.model.bounded(0, casadi.vec(self.pVar), 1))

        for i in range(P.shape[0]):
            if (S[i, 0] >= 0):
                self.model.subject_to(self.sVar[i] == S[i, 0])
            for j in range(P.shape[1]):
                if (P[i, j] >= 0):
                    self.model.subject_to(self.pVar[i, j] == P[i, j])
            #self.model.subject_to((self.pVar[i,0]+self.pVar[i,1]+self.pVar[i,2])==1.0)

        self.model.subject_to(self.stateVar[:, 0] == self.initX)

        for i in range(P.shape[0]):
            for h in range(H):
                self.model.subject_to(self.sVar[1, 0] <= self.stateVar[1, h])
                if (i == 0):
                    self.model.subject_to(self.tVar[i, h] == MU[i])
                else:
                    #self.model.subject_to(self.tVar[i,h]==MU[i]*casadi.fmin(self.sVar[i,0],self.stateVar[i,h]))
                    self.model.subject_to(self.tVar[i, h] == MU[i] *
                                          self.sVar[i, 0])

        for i in range(P.shape[0]):
            for h in range(H - 1):
                self.model.subject_to(
                    self.stateVar[i, h + 1] ==
                    (-self.tVar[i, h] + self.pVar[:, i].T @ self.tVar[:, h]) *
                    dt + self.stateVar[i, h])
                # self.model.subject_to(self.stateVar[i,h+1]==(-MU[i]*casadi.fmin(self.sVar[i],self.stateVar[i,h])
                #                                               +self.pVar[:,i].T@(MU*casadi.fmin(self.sVar,self.stateVar[:,h])))*dt+self.stateVar[i,h])

        # for h in range(H):
        #     self.model.subject_to(self.E_abs[0,h]>=(self.stateVar[1,h]-tgt*(1.0/MU[1])*self.tVar[1,h]))
        #     self.model.subject_to(self.E_abs[0,h]>=-(self.stateVar[1,h]-tgt*(1.0/MU[1])*self.tVar[1,h]))

        #self.model.minimize(casadi.sumsqr(self.stateVar[1,:]-tgt))
        # obj=0
        # for h in range(H):
        #     obj+=(self.stateVar[1,h]-tgt*MU[1]*self.tVar[1,h])**2

        optionsIPOPT = {'print_time': False, 'ipopt': {'print_level': 0}}
        optionsOSQP = {'print_time': False, 'osqp': {'verbose': False}}
        self.model.solver('ipopt', optionsIPOPT)
예제 #19
0
    def test_ivp1(self):
        """Test solving IVP1 with collocation
        """
        x = cs.MX.sym('x')
        xdot = x

        N = 10
        tf = 1
        pdq = cl.Pdq(t=[0, tf], poly_order=N)

        X = cs.MX.sym('X', 1, N + 1)
        f = cs.Function('f', [x], [xdot])
        F = f.map(N + 1, 'serial')

        x0 = cs.MX.sym('x0')
        eq = cs.Function('eq', [cs.vec(X)], [cs.vec(F(X) - pdq.derivative(X))])
        rf = cs.rootfinder('rf', 'newton', eq)

        sol = cs.reshape(rf(cs.DM.zeros(X.shape)), X.shape)
        nptest.assert_allclose(sol[:, -1], 1 * np.exp(1 * tf))
예제 #20
0
    def _parametrize_nu(self):
        nu_pol, nu_par = self.create_variable_polynomial_approximation(
            self.n_relax, self.degree, 'nu')

        self.nu_pol = vertcat(self.nu_pol, nu_pol)
        self.nu_par = vertcat(self.nu_par, nu_par)

        self.problem.replace_variable(self.nu_sym, nu_pol)
        self.problem.model.include_theta(vec(nu_par))

        return nu_pol, nu_par
예제 #21
0
    def create_optimization_problem(self):
        if not self.prepared:
            self.prepare()
            self.prepared = True

        has_parameters = (self.model.n_p + self.model.n_theta > 0
                          or self.initial_condition_as_parameter
                          or self.problem.last_u is not None)

        # parameters MX
        p_mx = MX.sym('mx_p', self.model.n_p)

        # theta MX
        theta_mx = MX.sym('mx_theta_', self.model.n_theta,
                          self.finite_elements)
        theta = dict([(i, vec(theta_mx[:, i]))
                      for i in range(self.finite_elements)])

        # initial cond MX
        p_mx_x_0 = MX.sym('mx_x_0_p', self.model.n_x)

        # last control MX
        if self.last_control_as_parameter:
            p_last_u = MX.sym('mx_last_u', self.model.n_u)
        else:
            p_last_u = []

        all_mx = vertcat(p_mx, vec(theta_mx), p_mx_x_0, p_last_u)

        args = {'p': p_mx, 'x_0': p_mx_x_0, 'theta': theta, 'last_u': p_last_u}

        # Discretize the problem
        opt_problem = self.discretizer.discretize(**args)

        if has_parameters:
            opt_problem.include_parameter(all_mx)

        opt_problem.solver_options = self.nlpsol_opts

        self.opt_problem = opt_problem
예제 #22
0
파일: ad.py 프로젝트: BrechtBa/casadi
  def test_MXevalMX(self):
    n=array([1.2,2.3,7,1.4])
    for inputshape in ["column","row","matrix"]:
      for outputshape in ["column","row","matrix"]:
        for inputtype in ["dense","sparse"]:
          for outputtype in ["dense","sparse"]:
            self.message("evalMX on MX. Input %s %s, Output %s %s" % (inputtype,inputshape,outputtype,outputshape) )
            f=MXFunction("f", self.mxinputs[inputshape][inputtype],self.mxoutputs[outputshape][outputtype](self.mxinputs[inputshape][inputtype][0]))
            f_in = DMatrix(f.inputSparsity(),n)
            [r] = f([f_in])
            J = self.jacobians[inputtype][outputtype](*n)
            
            seeds = [[1,0,0,0],[0,2,0,0],[1.2,4.8,7.9,4.6]]
            
            y = MX.sym("y",f.inputSparsity())
            
            fseeds = map(lambda x: DMatrix(f.inputSparsity(),x), seeds)
            aseeds = map(lambda x: DMatrix(f.outputSparsity(),x), seeds)
            with internalAPI():
              res = f.call([y])
              fwdsens = f.callForward([y],res,map(lambda x: [x],fseeds))
              adjsens = f.callReverse([y],res,map(lambda x: [x],aseeds))
            fwdsens = map(lambda x: x[0],fwdsens)
            adjsens = map(lambda x: x[0],adjsens)
            
            fe = MXFunction('fe', [y],res)
            
            [re] = fe([f_in])
            
            self.checkarray(r,re)
            
            for sens,seed in zip(fwdsens,fseeds):
              fe = MXFunction("fe", [y],[sens])
              [re] = fe([f_in])
              self.checkarray(c.vec(re),mul(J,c.vec(seed)),"AD") 

            for sens,seed in zip(adjsens,aseeds):
              fe = MXFunction("fe", [y],[sens])
              [re] = fe([f_in])
              self.checkarray(c.vec(re),mul(J.T,c.vec(seed)),"AD") 
예제 #23
0
파일: ad.py 프로젝트: RobotXiaoFeng/casadi
  def test_MXeval_mx(self):
    n=array([1.2,2.3,7,1.4])
    for inputshape in ["column","row","matrix"]:
      for outputshape in ["column","row","matrix"]:
        for inputtype in ["dense","sparse"]:
          for outputtype in ["dense","sparse"]:
            self.message("eval_mx on MX. Input %s %s, Output %s %s" % (inputtype,inputshape,outputtype,outputshape) )
            f=Function("f", self.mxinputs[inputshape][inputtype],self.mxoutputs[outputshape][outputtype](self.mxinputs[inputshape][inputtype][0]))
            f_in = DM(f.sparsity_in(0),n)
            r = f(f_in)
            J = self.jacobians[inputtype][outputtype](*n)
            
            seeds = [[1,0,0,0],[0,2,0,0],[1.2,4.8,7.9,4.6]]
            
            y = MX.sym("y",f.sparsity_in(0))
            
            fseeds = map(lambda x: DM(f.sparsity_in(0),x), seeds)
            aseeds = map(lambda x: DM(f.sparsity_out(0),x), seeds)
            res = f(y)
            fwdsens = f.forward([y],[res],map(lambda x: [x],fseeds))
            adjsens = f.reverse([y],[res],map(lambda x: [x],aseeds))
            fwdsens = map(lambda x: x[0],fwdsens)
            adjsens = map(lambda x: x[0],adjsens)
            
            fe = Function('fe', [y], [res])
            
            re = fe(f_in)
            
            self.checkarray(r,re)
            
            for sens,seed in zip(fwdsens,fseeds):
              fe = Function("fe", [y],[sens])
              re = fe(f_in)
              self.checkarray(c.vec(re),mtimes(J,c.vec(seed)),"AD") 

            for sens,seed in zip(adjsens,aseeds):
              fe = Function("fe", [y],[sens])
              re = fe(f_in)
              self.checkarray(c.vec(re),mtimes(J.T,c.vec(seed)),"AD") 
예제 #24
0
 def get_cov_trans_func(f, h, x, x_new, u, p, Q, R):
     P = cs.reshape(p, 11, 11)
     A = cs.jacobian(f(x, u), x)
     H = cs.jacobian(h(x_new, u), x_new)
     P_pred = A @ P @ (A.T) + Q
     S = H @ P_pred @ (H.T) + R
     K = cs.mrdivide(P_pred @ (H.T), S)
     P_updated = (cs.MX.eye(11) - K @ H) @ P_pred
     P_updated = (P_updated + P_updated.T) / 2
     p_updated = cs.vec(P_updated)
     cov_trans = cs.Function('cov_trans', [x, x_new, u, p], [p_updated],
                             ['x', 'x_new', 'u', 'p'], ['p_updated'])
     return cov_trans
예제 #25
0
def expm(a_matrix):
    """Since casadi does not have native support for matrix exponential, this is a trick to computing it.
    It can be quite expensive, specially for large matrices.
    THIS ONLY SUPPORT NUMERIC MATRICES AND MX VARIABLES, DOES NOT SUPPORT SX SYMBOLIC VARIABLES.

    :param DM a_matrix: matrix
    :return:
    """
    dim = a_matrix.shape[1]

    # Create the integrator
    x_mx = MX.sym('x', a_matrix.shape[1])
    a_mx = MX.sym('x', a_matrix.shape)
    ode = mtimes(a_mx, x_mx)
    dae_system_dict = {'x': x_mx, 'ode': ode, 'p': vec(a_mx)}

    integrator_ = integrator("integrator", "cvodes", dae_system_dict,
                             {'tf': 1})
    integrator_map = integrator_.map(a_matrix.shape[1], 'thread')

    res = integrator_map(x0=DM.eye(dim),
                         p=repmat(vec(a_matrix), (1, a_matrix.shape[1])))['xf']

    return res
예제 #26
0
 def get_cov_trans_func(f, h, obs_jacobians, x, u, v, p, Q, obs_cov):
     Nt = (x.shape[0] - 2) // 2
     P = cs.reshape(p, 2 * Nt, 2 * Nt)
     A = cs.jacobian(f(x, u), x)[2:, 2:]
     x_new = f(x, u)
     H, M = obs_jacobians(x_new, v)
     P_pred = A @ P @ (A.T) + Q
     R = obs_cov(f(x, u))
     S = H @ P_pred @ (H.T) + M @ R @ (M.T)
     K = cs.mrdivide(P_pred @ (H.T), S)
     P_updated = (cs.MX.eye(2 * Nt) - K @ H) @ P_pred
     P_updated = (P_updated + P_updated.T) / 2
     p_updated = cs.vec(P_updated)
     cov_trans = cs.Function('cov_trans', [x, u, v, p], [p_updated],
                             ['x', 'u', 'v', 'p'], ['p_updated'])
     return cov_trans
예제 #27
0
    def solve(self):
        """Solve the optimal control problem

        solve() first check for steady state feasibility and defines the
        optimal control problem. After solving, the instance variable sol can
        be used to examine the solution.

        TODO: Add support for other solvers
        TODO: version bump
        """
        if len(self.prob['s']) == 0:
            self.set_grid()
        # Check feasibility
        # self.check_ss_feasibility()
        # Construct optimization problem
        self.prob['solver'] = None
        N = self.options['N']
        self.prob['vars'] = cas.ssym("b", N + 1, self.sys.order)
        V = cas.vec(self.prob['vars'])
        self._make_objective()
        self._make_constraints()

        con = cas.SXFunction([V], [self.prob['con'][0]])
        obj = cas.SXFunction([V], [self.prob['obj']])
        if self.options.get('solver') == 'Ipopt':
            solver = cas.IpoptSolver(obj, con)
        else:
            print """Other solver than Ipopt are currently not supported,
            switching to Ipopt"""
            solver = cas.IpoptSolver(obj, con)
        for option, value in self.options.iteritems():
            if solver.hasOption(option):
                solver.setOption(option, value)
        solver.init()
        # Setting constraints
        solver.setInput(cas.vertcat(self.prob['con'][1]), "lbg")
        solver.setInput(cas.vertcat(self.prob['con'][2]), "ubg")
        solver.setInput([np.inf] * self.sys.order * (N + 1), "ubx")
        solver.setInput(
            cas.vertcat((
                [0] * (N + 1),
                (self.sys.order - 1) * (N + 1) * [-np.inf])),
            "lbx")

        solver.solve()
        self.prob['solver'] = solver
        self._get_solution()
예제 #28
0
    def assert_model_equivalent_numeric(self, A, B, tol=1e-9):
        self.assertEqual(len(A.states), len(B.states))
        self.assertEqual(len(A.der_states), len(B.der_states))
        self.assertEqual(len(A.inputs), len(B.inputs))
        self.assertEqual(len(A.outputs), len(B.outputs))
        self.assertEqual(len(A.constants), len(B.constants))
        self.assertEqual(len(A.parameters), len(B.parameters))

        if not isinstance(A, CachedModel) and not isinstance(B, CachedModel):
            self.assertEqual(len(A.equations), len(B.equations))
            self.assertEqual(len(A.initial_equations),
                             len(B.initial_equations))

        for f_name in [
                'dae_residual', 'initial_residual', 'variable_metadata'
        ]:
            this = getattr(A, f_name + '_function')
            that = getattr(B, f_name + '_function')

            np.random.seed(0)

            args_in = []
            for i in range(this.n_in()):
                sp = this.sparsity_in(0)
                r = ca.DM(sp, np.random.random(sp.nnz()))
                args_in.append(r)

            this_out = this.call(args_in)
            that_out = that.call(args_in)

            # N.B. Here we require that the order of the equations in the two models is identical.
            for i, (a, b) in enumerate(zip(this_out, that_out)):
                for j in range(a.size1()):
                    for k in range(a.size2()):
                        if a[j, k].is_regular() or b[j, k].is_regular():
                            test = float(ca.norm_2(
                                ca.vec(a[j, k] - b[j, k]))) <= tol
                            if not test:
                                print(j)
                                print(k)
                                print(a[j, k])
                                print(b[j, k])
                                print(f_name)
                            self.assertTrue(test)

        return True
예제 #29
0
    def solve(self):
        """Solve the optimal control problem

        solve() first check for steady state feasibility and defines the
        optimal control problem. After solving, the instance variable sol can
        be used to examine the solution.

        TODO: Add support for other solvers
        TODO: version bump
        """
        if len(self.prob['s']) == 0:
            self.set_grid()
        # Check feasibility
        # self.check_ss_feasibility()
        # Construct optimization problem
        self.prob['solver'] = None
        N = self.options['N']
        self.prob['vars'] = cas.ssym("b", N + 1, self.sys.order)
        V = cas.vec(self.prob['vars'])
        self._make_objective()
        self._make_constraints()

        con = cas.SXFunction([V], [self.prob['con'][0]])
        obj = cas.SXFunction([V], [self.prob['obj']])
        if self.options.get('solver') == 'Ipopt':
            solver = cas.IpoptSolver(obj, con)
        else:
            print """Other solver than Ipopt are currently not supported,
            switching to Ipopt"""
            solver = cas.IpoptSolver(obj, con)
        for option, value in self.options.iteritems():
            if solver.hasOption(option):
                solver.setOption(option, value)
        solver.init()
        # Setting constraints
        solver.setInput(cas.vertcat(self.prob['con'][1]), "lbg")
        solver.setInput(cas.vertcat(self.prob['con'][2]), "ubg")
        solver.setInput([np.inf] * self.sys.order * (N + 1), "ubx")
        solver.setInput(
            cas.vertcat(
                ([0] * (N + 1), (self.sys.order - 1) * (N + 1) * [-np.inf])),
            "lbx")

        solver.solve()
        self.prob['solver'] = solver
        self._get_solution()
예제 #30
0
def create_constant_theta(constant, dimension, finite_elements):
    """
        Create constant theta

    The created theta will be a dictionary with keys = range(finite_element) and each value will be a vector with value
    'constant' and number of rows 'dimension'

    :param float|int|DM constant: value of each theta entry .
    :param int dimension: number of rows of the vector of each theta entry.
    :param int finite_elements: number of theta entries.
    :return: constant theta
    :rtype: dict
    """
    theta = {}
    for i in range(finite_elements):
        theta[i] = vec(constant * DM.ones(dimension, 1))
    return theta
예제 #31
0
 def log(self, initial_state, expected_x, dU, ref, disturbance):
     self.log_initial_state = ca.horzcat(self.log_initial_state,
                                         ca.vec(initial_state))
     self.log_ref = ca.horzcat(self.log_ref, ca.vec(ref))
     self.log_disturbance = ca.horzcat(self.log_disturbance,
                                       ca.vec(disturbance))
     self.log_expected_x = ca.horzcat(self.log_expected_x,
                                      ca.vec(expected_x))
     self.log_dU = ca.horzcat(self.log_dU, ca.vec(dU))
     self.log_control_cost = ca.horzcat(
         self.log_control_cost,
         ca.vec(self.get_input_change_cost() @ self.get_input_change_cost()
                @ dU))
     self.log_state_cost = ca.horzcat(
         self.log_state_cost,
         ca.vec(
             self.get_state_cost() @ self.get_state_cost() @ initial_state))
예제 #32
0
    def exitForStatement(self, tree):
        logger.debug('exitForStatement')

        f = self.for_loops.pop()
        if len(f.values) > 0:
            indexed_symbols = list(f.indexed_symbols.keys())
            args = [f.index_variable] + indexed_symbols
            expr = ca.vcat(
                [ca.vec(self.get_mx(e.right)) for e in tree.statements])
            free_vars = ca.symvar(expr)

            arg_names = [arg.name() for arg in args]
            free_vars = [e for e in free_vars if e.name() not in arg_names]
            all_args = args + free_vars
            F = ca.Function('loop_body', all_args, [expr])

            indexed_symbols_full = []
            for k in indexed_symbols:
                s = f.indexed_symbols[k]
                orig_symbol = self.nodes[self.current_class][s.tree.name]
                indexed_symbol = orig_symbol[s.indices]
                if s.transpose:
                    indexed_symbol = ca.transpose(indexed_symbol)
                indexed_symbols_full.append(indexed_symbol)

            Fmap = F.map("map", self.map_mode, len(f.values),
                         list(range(len(args), len(all_args))), [])
            res = Fmap.call([f.values] + indexed_symbols_full + free_vars)

            # Split into a list of statements
            variables = [
                assignment.left for statement in tree.statements
                for assignment in self.get_mx(statement)
            ]
            all_assignments = []
            for i in range(len(f.values)):
                for j, variable in enumerate(variables):
                    all_assignments.append(Assignment(variable, res[0][j,
                                                                       i].T))

            self.src[tree] = all_assignments
        else:
            self.src[tree] = []
예제 #33
0
def create_polynomial_approximation(tau,
                                    size,
                                    degree,
                                    name='var_appr',
                                    point_at_zero=False):
    """
        Create a polynomial function.

    :param casadi.SX tau:
    :param int size: size of the approximated variable (number of rows)
    :param int degree: approximation degree
    :param list|str name: name for created parameters
    :param bool point_at_zero: if the polynomial has an collocation point at tau=0
    :return: (pol, par), returns the polynomial and a vector of parameters
    :rtype: tuple
    """
    if not isinstance(name, list):
        name = [name + '_' + str(i) for i in range(size)]

    # define the number of parameters
    if point_at_zero and degree > 1:
        n_par = degree + 1
    else:
        n_par = degree

    # if size = an empty symbolic variable (shape (0, n_par) is created
    if size > 0:
        points = vertcat(*[SX.sym(name[s], 1, n_par) for s in range(size)])
    else:
        points = SX.sym('empty_sx', size, n_par)

    # if the degree=1 the points are already the approximation, otherwise create a lagrangian polynomial basis
    if degree == 1:
        pol = points
    else:
        ell_list = _create_lagrangian_polynomial_basis(
            tau=tau, degree=degree, point_at_zero=point_at_zero)
        pol = sum([ell_list[j] * points[:, j] for j in range(n_par)])
    par = vec(points)

    return pol, par
예제 #34
0
    def exitForEquation(self, tree):
        f = self.for_loops.pop()

        indexed_symbols = list(f.indexed_symbols.keys())
        args = [f.index_variable] + indexed_symbols
        expr = ca.vcat([ca.vec(self.get_mx(e)) for e in tree.equations])
        free_vars = ca.symvar(expr)

        arg_names = [arg.name() for arg in args]
        free_vars = [e for e in free_vars if e.name() not in arg_names]
        all_args = args + free_vars
        F = ca.Function('loop_body_' + f.name, all_args, [expr])

        indexed_symbols_full = [
            self.nodes[f.indexed_symbols[k].tree.name][
                f.indexed_symbols[k].indices - 1] for k in indexed_symbols
        ]
        Fmap = F.map("map", "serial", len(f.values),
                     list(range(len(args), len(all_args))), [])
        res = Fmap.call([f.values] + indexed_symbols_full + free_vars)

        self.src[tree] = res[0].T
예제 #35
0
    def exitForStatement(self, tree):
        logger.debug('exitForStatement')

        f = self.for_loops.pop()
        if len(f.values) > 0:
            indexed_symbols = list(f.indexed_symbols.keys())
            args = [f.index_variable] + indexed_symbols
            expr = ca.vcat([ca.vec(self.get_mx(e.right)) for e in tree.statements])
            free_vars = ca.symvar(expr)

            arg_names = [arg.name() for arg in args]
            free_vars = [e for e in free_vars if e.name() not in arg_names]
            all_args = args + free_vars
            F = ca.Function('loop_body', all_args, [expr])

            indexed_symbols_full = []
            for k in indexed_symbols:
                s = f.indexed_symbols[k]
                orig_symbol = self.nodes[self.current_class][s.tree.name]
                indexed_symbol = orig_symbol[s.indices]
                if s.transpose:
                    indexed_symbol = ca.transpose(indexed_symbol)
                indexed_symbols_full.append(indexed_symbol)

            Fmap = F.map("map", self.map_mode, len(f.values), list(
                range(len(args), len(all_args))), [])
            res = Fmap.call([f.values] + indexed_symbols_full + free_vars)

            # Split into a list of statements
            variables = [assignment.left for statement in tree.statements for assignment in self.get_mx(statement)]
            all_assignments = []
            for i in range(len(f.values)):
                for j, variable in enumerate(variables):
                    all_assignments.append(Assignment(variable, res[0][j, i].T))

            self.src[tree] = all_assignments
        else:
            self.src[tree] = []
예제 #36
0
 def _get_solution(self):
     solver = self.prob['solver']
     N = self.options['N']
     Nc = self.options['Nc']
     x_opt = np.array(solver.getOutput("x")).ravel()
     delta = np.diff(self.prob['s'])
     b_opt = np.reshape(x_opt[:self.sys.order * (N + 1)], (N + 1, -1), order='F')
     h_opt = np.reshape(x_opt[self.sys.order * (N + 1):], (Nc, -1), order='F')
     time = np.cumsum(np.hstack([0, 2 * delta / (np.sqrt(b_opt[:-1, 0]) +
                                              np.sqrt(b_opt[1:, 0]))]))
     # Resample to constant time-grid
     t = np.linspace(time[0], time[-1], self.options['Nt'])
     b_opt = np.array([np.interp(t, time, b) for b in b_opt.T]).T
     # Get s and derivatives from b_opt
     s = np.interp(t, time, self.prob['s'])
     b, Ds = self._make_path()[1:]
     Ds_f = cas.SXFunction([b], [Ds])  # derivatives of s wrt b
     Ds_f.init()
     s_opt = np.vstack((s, np.vstack([evalf(Ds_f, bb).toArray().ravel()
                                       for bb in b_opt]).T)).T
     self.sol['s'] = s_opt
     self.sol['h'] = h_opt
     self.sol['b'] = b_opt
     self.sol['t'] = t
     # Evaluate the states
     basisH = self._make_basis()
     B = [np.dot(basisH(s_opt[:, 0]), h_opt)]
     for i in range(1, self.h.size2()):
         # B.append(np.matrix(np.dot(basisH.derivative(s_opt[:, 0], i), h_opt)))
         Bi, p = basisH.derivative(i)
         B.append(np.matrix(np.dot(np.dot(Bi(s_opt[:, 0]), p), h_opt)))
     f = cas.SXFunction([cas.vertcat([self.s, cas.vec(self.h)])], [cas.substitute(self.sys.x.values(),
                                        self.sys.y, self.path)])
     f_val = np.array([evalf(f, s.T).toArray().ravel() for s in np.hstack((s_opt, np.hstack(B)))])
     self.sol['states'] = dict([(k, f_val[:, i]) for i, k in
                       enumerate(self.sys.x.keys())])
예제 #37
0
    def test_ivp(self):
        """Test solving IVP with collocation
        """
        x = cs.MX.sym('x')
        xdot = x
        dae = dae_model.SemiExplicitDae(x=x, ode=xdot)

        N = 4
        tf = 1
        scheme = cl.CollocationScheme(dae=dae,
                                      t=[0, tf],
                                      order=N,
                                      method='legendre')

        x0 = cs.MX.sym('x0')
        var = scheme.combine(['x', 'K'])

        eqf = cs.Function('eq', [cs.vec(var), x0],
                          [cs.vertcat(scheme.eq, scheme.x[:, 0] - x0)])
        rf = cs.rootfinder('rf', 'newton', eqf)

        sol = var(rf(var(0), 1))
        nptest.assert_allclose(sol['x', :, -1],
                               np.atleast_2d(1 * np.exp(1 * tf)))
예제 #38
0
def fullCamModel(dae,conf):
    PdatC1 = conf['PdatC1']
    PdatC2 = conf['PdatC2']
    RPC1 = conf['RPC1']
    RPC2 = conf['RPC2']
    pos_marker_body1 = conf['pos_marker_body1']
    pos_marker_body2 = conf['pos_marker_body2']
    pos_marker_body3 = conf['pos_marker_body3']
    
    RpC1 = C.DMatrix.eye(4)
    RpC1[0:3,0:3] = RPC1[0:3,0:3].T
    RpC1[0:3,3] = C.mul(-RPC1[0:3,0:3].T,RPC1[0:3,3])
    RpC2 = C.DMatrix.eye(4);
    RpC2[0:3,0:3] = RPC2[0:3,0:3].T
    RpC2[0:3,3] = C.mul(-RPC2[0:3,0:3].T,RPC2[0:3,3])
    
    PC1 = C.SXMatrix(3,3)
    PC1[0,0] = PdatC1[0]
    PC1[1,1] = PdatC1[1]
    PC1[0,2] = PdatC1[2]
    PC1[1,2] = PdatC1[3]
    PC1[2,2] = 1.0
    PC2 = C.SXMatrix(3,3)
    PC2[0,0] = PdatC2[0]
    PC2[1,1] = PdatC2[1]
    PC2[0,2] = PdatC2[2]
    PC2[1,2] = PdatC2[3]
    PC2[2,2] = 1.0
    p = C.vertcat([dae['x'],dae['y'],dae['z']])
    R = C.veccat( [dae[n] for n in ['e11', 'e12', 'e13',
                                    'e21', 'e22', 'e23',
                                    'e31', 'e32', 'e33']]
                      ).reshape((3,3))
    uv_all = C.vertcat([C.vec(singleCamModel(p,R,RpC1[0:3,:],PC1,pos_marker_body1)) ,\
                        C.vec(singleCamModel(p,R,RpC1[0:3,:],PC1,pos_marker_body2)) ,\
                        C.vec(singleCamModel(p,R,RpC1[0:3,:],PC1,pos_marker_body3)) ,\
                        C.vec(singleCamModel(p,R,RpC2[0:3,:],PC2,pos_marker_body1)) ,\
                        C.vec(singleCamModel(p,R,RpC2[0:3,:],PC2,pos_marker_body2)) ,\
                        C.vec(singleCamModel(p,R,RpC2[0:3,:],PC2,pos_marker_body3))])
    return uv_all
예제 #39
0
    def exitForEquation(self, tree):
        logger.debug('exitForEquation')

        f = self.for_loops.pop()
        if len(f.values) > 0:
            indexed_symbols = list(f.indexed_symbols.keys())
            args = [f.index_variable] + indexed_symbols
            expr = ca.vcat([ca.vec(self.get_mx(e)) for e in tree.equations])
            free_vars = ca.symvar(expr)

            arg_names = [arg.name() for arg in args]
            free_vars = [e for e in free_vars if e.name() not in arg_names]
            all_args = args + free_vars
            F = ca.Function('loop_body', all_args, [expr])

            indexed_symbols_full = []
            for k in indexed_symbols:
                s = f.indexed_symbols[k]
                indices = s.indices
                try:
                    i = self.model.delay_states.index(k.name())
                except ValueError:
                    orig_symbol = self.nodes[self.current_class][s.tree.name]
                else:
                    # We are missing a similarly shaped delayed symbol. Make a new one with the appropriate shape.
                    delay_symbol = self.model.delay_arguments[i]

                    # We need to figure out the shape of the expression that
                    # we are delaying. The symbols that can occur in the delay
                    # expression should have been encountered before this
                    # iteration of the loop. The assert statement below covers
                    # this.
                    delay_expr_args = free_vars + all_args[:len(indexed_symbols_full)+1]
                    assert set(ca.symvar(delay_symbol.expr)).issubset(delay_expr_args)

                    f_delay_expr = ca.Function('delay_expr', delay_expr_args, [delay_symbol.expr])
                    f_delay_map = f_delay_expr.map("map", self.map_mode, len(f.values), list(
                        range(len(free_vars))), [])
                    [res] = f_delay_map.call(free_vars + [f.values] + indexed_symbols_full)
                    res = res.T

                    # Make the symbol with the appropriate size, and replace the old symbol with the new one.
                    orig_symbol = _new_mx(k.name(), *res.size())
                    assert res.size1() == 1 or res.size2() == 1, "Slicing does not yet work with 2-D indices"
                    indices = slice(None, None)

                    model_input = next(x for x in self.model.inputs if x.symbol.name() == k.name())
                    model_input.symbol = orig_symbol
                    self.model.delay_arguments[i] = DelayArgument(res, delay_symbol.duration)

                indexed_symbol = orig_symbol[indices]
                if s.transpose:
                    indexed_symbol = ca.transpose(indexed_symbol)
                indexed_symbols_full.append(indexed_symbol)

            Fmap = F.map("map", self.map_mode, len(f.values), list(
                range(len(args), len(all_args))), [])
            res = Fmap.call([f.values] + indexed_symbols_full + free_vars)

            self.src[tree] = res[0].T
        else:
            self.src[tree] = ca.MX()
예제 #40
0
def vec(inputobj):

    return ca.vec(inputobj)
예제 #41
0
    def  __init__(self,objective,*args):
        """
               optisolve(objective)
               optisolve(objective,constraints)
        """
        if len(args)>=1:
            constraints = args[0]
        else:
            constraints = []
        options = dict()
        if len(args)>=2:
            options = args[1]
        
        
        if not isinstance(constraints,list):
            raise Exception("Constraints must be given as list: [x>=0,y<=0]")
            

        [ gl_pure, gl_equality] = sort_constraints( constraints )
        symbols = OptimizationObject.get_primitives([objective]+gl_pure)

        # helper functions for 'x'
        X = C.veccat(*symbols["x"])
        helper = C.Function('helper',[X],symbols["x"])

        helper_inv = C.Function('helper_inv',symbols["x"],[X])

        # helper functions for 'p' if applicable
        if 'p' in symbols:
          P = C.veccat(*symbols["p"])

          self.Phelper_inv = C.Function('Phelper_inv',symbols["p"],[P])
          
        else:
          P = C.MX.sym('p',0,1)

        if len(gl_pure)>0:
            g_helpers = [];
            for p in gl_pure:
               g_helpers.append(C.MX.sym('g',p.sparsity())) 
            
            G_helpers = C.veccat(*g_helpers)

            self.Ghelper = C.Function('Ghelper',[G_helpers],g_helpers)

            self.Ghelper_inv = C.Function('Ghelper_inv',g_helpers,[G_helpers])
        
        codegen = False;
        if 'codegen' in options:
            codegen = options["codegen"]
            del options["codegen"]
        
        opt = {}
        if codegen:
            options["jit"] = True
            opt["jit"] = True
        
        gl_pure_v = C.MX()
        if len(gl_pure)>0:
           gl_pure_v = C.veccat(*gl_pure)

        if objective.is_vector() and objective.numel()>1:
            F = C.vec(objective)
            objective = 0.5*C.dot(F,F)
            FF = C.Function('nlp',[X,P], [F])
            JF = FF.jacobian()
            J_out = JF.call([X,P])
            J = J_out[0].T;
            H = C.mtimes(J,J.T)
            sigma = C.MX.sym('sigma')
            Hf = C.Function('H',dict(x=X,p=P,lam_f=sigma,hess_gamma_x_x=sigma*C.triu(H)),['x', 'p', 'lam_f', 'lam_g'],['hess_gamma_x_x'],opt)
            if "expand" in options and options["expand"]:
               Hf = Hf.expand()
            options["hess_lag"] = Hf
        
        nlp = {"x":X,"p":P,"f":objective,"g":gl_pure_v}

        self.solver = C.nlpsol('solver','ipopt', nlp, options)

        # Save to class properties
        self.symbols      = symbols
        self.helper       = helper
        self.helper_inv   = helper_inv
        self.gl_equality  = gl_equality
        
        self.resolve()
예제 #42
0
파일: pecas.py 프로젝트: adbuerger/PECas
    def run_parameter_estimation(self, hessian = "gauss-newton"):

        r'''
        :param hessian: Method of hessian calculation/approximation; possible
                        values are `gauss-newton` and `exact-hessian`
        :type hessian: str

        This functions will run a least squares parameter estimation for the
        given problem and data set.
        For this, an NLP of the following
        structure is set up with a direct collocation approach and solved
        using IPOPT:

        .. math::

            \begin{aligned}
                & \text{arg}\,\underset{x, p, v, \epsilon_e, \epsilon_u}{\text{min}} & & \frac{1}{2} \| R(w, v, \epsilon_e, \epsilon_u) \|_2^2 \\
                & \text{subject to:} & & R(w, v, \epsilon_e, \epsilon_u) = w^{^\mathbb{1}/_\mathbb{2}} \begin{pmatrix} {v} \\ {\epsilon_e} \\ {\epsilon_u} \end{pmatrix} \\
                & & & w = \begin{pmatrix} {w_{v}}^T & {w_{\epsilon_{e}}}^T & {w_{\epsilon_{u}}}^T \end{pmatrix} \\
                & & & v_{l} + y_{l} - \phi(t_{l}, u_{l}, x_{l}, p) = 0 \\
                & & & (t_{k+1} - t_{k}) f(t_{k,j}, u_{k,j}, x_{k,j}, p, \epsilon_{e,k,j}, \epsilon_{u,k,j}) - \sum_{r=0}^{d} \dot{L}_r(\tau_j) x_{k,r} = 0 \\
                & & & x_{k+1,0} - \sum_{r=0}^{d} L_r(1) x_{k,r} = 0 \\
                & & & t_{k,j} = t_k + (t_{k+1} - t_{k}) \tau_j \\
                & & & L_r(\tau) = \prod_{r=0,r\neq j}^{d} \frac{\tau - \tau_r}{\tau_j - \tau_r}\\
                & \text{for:} & & k = 1, \dots, N, ~~~ l = 1, \dots, M, ~~~ j = 1, \dots, d, ~~~ r = 1, \dots, d \\
                & & & \tau_j = \text{time points w. r. t. scheme and order}
            \end{aligned}


        The status of IPOPT provides information whether the computation could
        be finished sucessfully. The optimal values for all optimization
        variables :math:`\hat{x}` can be accessed
        via the class variable ``LSq.Xhat``, while the estimated parameters
        :math:`\hat{p}` can also be accessed separately via the class attribute
        ``LSq.phat``.

        **Please be aware:** IPOPT finishing sucessfully does not necessarly
        mean that the estimation results for the unknown parameters are useful
        for your purposes, it just means that IPOPT was able to solve the given
        optimization problem.
        You have in any case to verify your results, e. g. by simulation using
        the class function :func:`run_simulation`.
        '''          

        intro.pecas_intro()
        print('\n' + 18 * '-' + \
            ' PECas least squares parameter estimation ' + 18 * '-')

        print('''
Starting least squares parameter estimation using IPOPT, 
this might take some time ...
''')

        self.tstart_estimation = time.time()

        g = ca.vertcat([ca.vec(self.pesetup.phiN) - self.yN + \
            ca.vec(self.pesetup.V)])

        self.R = ca.sqrt(self.w) * \
            ca.veccat([self.pesetup.V, self.pesetup.EPS_E, self.pesetup.EPS_U])

        if self.pesetup.g.size():

            g = ca.vertcat([g, self.pesetup.g])

        self.g = g

        self.Vars = ca.veccat([

                self.pesetup.P, \
                self.pesetup.X, \
                self.pesetup.XF, \
                self.pesetup.V, \
                self.pesetup.EPS_E, \
                self.pesetup.EPS_U, \

            ])


        nlp = ca.MXFunction("nlp", ca.nlpIn(x=self.Vars), \
            ca.nlpOut(f=(0.5 * ca.mul([self.R.T, self.R])), g=self.g))

        options = {}
        options["tol"] = 1e-10
        options["linear_solver"] = self.linear_solver

        if hessian == "gauss-newton":

            # ipdb.set_trace()

            gradF = nlp.gradient()
            jacG = nlp.jacobian("x", "g")

            # Can't the following be implemented more efficiently?!

            # gradF.derivative(0, 1)

            J = ca.jacobian(self.R, self.Vars)

            sigma = ca.MX.sym("sigma")
            hessLag = ca.MXFunction("H", \
                ca.hessLagIn(x = self.Vars, lam_f = sigma), \
                ca.hessLagOut(hess = sigma * ca.mul(J.T, J)))
        
            options["hess_lag"] = hessLag
            options["grad_f"] = gradF
            options["jac_g"] = jacG

        elif hessian == "exact-hessian":

            # let NlpSolver-class compute everything

            pass

        else:

            raise NotImplementedError( \
                "Requested method is not implemented. Availabe methods " + \
                "are 'gauss-newton' (default) and 'exact-hessian'.")

        # Initialize the solver, solve the optimization problem

        solver = ca.NlpSolver("solver", "ipopt", nlp, options)

        # Store the results of the computation

        Varsinit = ca.veccat([

                self.pesetup.Pinit, \
                self.pesetup.Xinit, \
                self.pesetup.XFinit, \
                self.pesetup.Vinit, \
                self.pesetup.EPS_Einit, \
                self.pesetup.EPS_Uinit, \

            ])  

        sol = solver(x0 = Varsinit, lbg = 0, ubg = 0)

        self.Varshat = sol["x"]

        R_squared_fcn = ca.MXFunction("R_squared_fcn", [self.Vars], 
            [ca.mul([ \
                ca.veccat([self.pesetup.V, self.pesetup.EPS_E, self.pesetup.EPS_U]).T, 
                ca.veccat([self.pesetup.V, self.pesetup.EPS_E, self.pesetup.EPS_U])])])

        [self.R_squared] = R_squared_fcn([self.Varshat])
        
        self.tend_estimation = time.time()
        self.duration_estimation = self.tend_estimation - \
            self.tstart_estimation

        print('''
Parameter estimation finished. Check IPOPT output for status information.''')
    V = ca.vertcat([P, EPS_U, X0])

    x_end = X0
    obj = [x_end - ydata[0,:].T]

    for k in range(int(N)):

        x_end = rk4(x0 = x_end, p = ca.vertcat([udata[k], EPS_U[k], P]))["xf"]
        obj.append(x_end - ydata_noise[k+1, :].T)

    r = ca.vertcat([ca.vertcat(obj), EPS_U])

    wv = (1.0 / sigma_y**2) * pl.ones(ydata.shape)
    weps_u = (1.0 / sigma_u**2) * pl.ones(udata.shape)

    Sigma_y_inv = ca.diag(ca.vec(wv))
    Sigma_u_inv = ca.diag(weps_u)

    Sigma = ca.blockcat(Sigma_y_inv, ca.DMatrix(pl.zeros((Sigma_y_inv.shape[0], Sigma_u_inv.shape[1]))),\
        ca.DMatrix(pl.zeros((Sigma_u_inv.shape[0], Sigma_y_inv.shape[1]))), Sigma_u_inv)

    nlp = ca.MXFunction("nlp", ca.nlpIn(x = V), ca.nlpOut(f = ca.mul([r.T, Sigma, r])))

    nlpsolver = ca.NlpSolver("nlpsolver", "ipopt", nlp)

    V0 = ca.vertcat([

            pl.ones(3), \
            pl.zeros(N), \
            ydata_noise[0,:].T
예제 #44
0
 def _make_constraints(self):
     """Parse the constraints and put them in the correct format"""
     N = self.options['N']
     Nc = self.options['Nc']
     b = self.prob['vars'][0]
     H = self.prob['vars'][1]
     # ODEs
     # ~~~~
     con = self._ode(b)
     lb = np.alen(con) * [0]
     ub = np.alen(con) * [0]
     # Convex combination
     # ~~~~~~~~~~~~~~~~~~
     con.append(cas.sumCols(H))
     lb.extend([1] * Nc)
     ub.extend([1] * Nc)
     # Sample constraints
     # ~~~~~~~~~~~~~~~~~~
     S = self.prob['s']
     path, bs = self._make_path()[0:2]
     basisH = self._make_basis()
     B = [np.matrix(basisH(S))]
     # TODO!!! ============================================================
     for i in range(1, self.h.size2()):
         # B.append(np.matrix(basisH.derivative(S, i)))
         Bi, p = basisH.derivative(i)
         B.append(np.matrix(np.dot(Bi(S), p)))
     # ====================================================================
     for f in self.constraints:
         F = cas.substitute(f[0], self.sys.y, path)
         if f[3] is None:
             F = cas.vertcat([cas.substitute(F,
                              cas.vertcat([
                                 self.s[0],
                                 bs,
                                 cas.vec(self.h)]),
                              cas.vertcat([
                                 S[j],
                                 cas.vec(b[j, :]),
                                 cas.vec(cas.vertcat([cas.mul(B[i][j, :], H) for i in range(self.h.size2())]).trans())
                              ])) for j in range(N + 1)])
             Flb = [evalf(cas.SXFunction([self.s], [cas.SXMatrix(f[1])]), s).toArray().ravel() for s in S]
             Fub = [evalf(cas.SXFunction([self.s], [cas.SXMatrix(f[2])]), s).toArray().ravel() for s in S]
             con.append(F)
             lb.extend(Flb)
             ub.extend(Fub)
         else:
             F = cas.vertcat([cas.substitute(F,
                              cas.vertcat([
                                 self.s[0],
                                 bs,
                                 cas.vec(self.h)]),
                              cas.vertcat([
                                 S[j],
                                 cas.vec(b[j, :]),
                                 cas.vec(cas.vertcat([cas.mul(B[i][j, :], H) for i in range(self.h.size2())]).trans())
                              ])) for j in f[3]])
             con.append(F)
             lb.extend([f[1]])
             ub.extend([f[2]])
     self.prob['con'] = [con, lb, ub]
예제 #45
0
    def exitForEquation(self, tree):
        logger.debug('exitForEquation')

        f = self.for_loops.pop()
        if len(f.values) > 0:
            indexed_symbols = list(f.indexed_symbols.keys())
            args = [f.index_variable] + indexed_symbols
            expr = ca.vcat([ca.vec(self.get_mx(e)) for e in tree.equations])
            free_vars = ca.symvar(expr)

            arg_names = [arg.name() for arg in args]
            free_vars = [e for e in free_vars if e.name() not in arg_names]
            all_args = args + free_vars
            F = ca.Function('loop_body', all_args, [expr])

            indexed_symbols_full = []
            for k in indexed_symbols:
                s = f.indexed_symbols[k]
                indices = s.indices
                try:
                    i = self.model.delay_states.index(k.name())
                except ValueError:
                    orig_symbol = self.nodes[self.current_class][s.tree.name]
                else:
                    # We are missing a similarly shaped delayed symbol. Make a new one with the appropriate shape.
                    delay_symbol = self.model.delay_arguments[i]

                    # We need to figure out the shape of the expression that
                    # we are delaying. The symbols that can occur in the delay
                    # expression should have been encountered before this
                    # iteration of the loop. The assert statement below covers
                    # this.
                    delay_expr_args = free_vars + all_args[:len(indexed_symbols_full)+1]
                    assert set(ca.symvar(delay_symbol.expr)).issubset(delay_expr_args)

                    f_delay_expr = ca.Function('delay_expr', delay_expr_args, [delay_symbol.expr])
                    f_delay_map = f_delay_expr.map("map", self.map_mode, len(f.values), list(
                        range(len(free_vars))), [])
                    [res] = f_delay_map.call(free_vars + [f.values] + indexed_symbols_full)
                    res = res.T

                    # Make the symbol with the appropriate size, and replace the old symbol with the new one.
                    orig_symbol = _new_mx(k.name(), *res.size())
                    assert res.size1() == 1 or res.size2() == 1, "Slicing does not yet work with 2-D indices"
                    indices = slice(None, None)

                    model_input = next(x for x in self.model.inputs if x.symbol.name() == k.name())
                    model_input.symbol = orig_symbol
                    self.model.delay_arguments[i] = DelayArgument(res, delay_symbol.duration)

                indexed_symbol = orig_symbol[indices]
                if s.transpose:
                    indexed_symbol = ca.transpose(indexed_symbol)
                indexed_symbols_full.append(indexed_symbol)

            Fmap = F.map("map", self.map_mode, len(f.values), list(
                range(len(args), len(all_args))), [])
            res = Fmap.call([f.values] + indexed_symbols_full + free_vars)

            self.src[tree] = res[0].T
        else:
            self.src[tree] = ca.MX()
예제 #46
0
def vectorize_row_major_casadi(M):
	assert type(M)==casadi.SXMatrix
	return casadi.vec(M.T)