Пример #1
0
def optimize_path(start_pos, target_pos_in, name):
    global target_pos
    target_pos = target_pos_in
    dynamics_engine = MyRobotDynamics.cached_init("simulation")
    dynamics_engine.dt.set_value(0.1)
    dynamics_engine.loads["drivetrain"].position.set_value(start_pos)
    dynamics_engine.prediction_func()
    init_state = dynamics_engine.mean_state.get_value()

    # optimization problem
    T = 100  # horizon
    u0 = .1 * np.random.randn(T, 2)  # initial controls
    # u0 = zeros((T, 2))  # initial controls
    options = {}

    # run the optimization
    options["lims"] = np.array([[-1, 1],
                                [-1, 1]])

    start_time = time.time()
    x, u, L, Vx, Vxx, cost = ilqg.ilqg(dynamics_engine, cost_func, init_state, u0, options)
    data = {"x": x, "u": u, "Vx": Vx, "Vxx": Vxx, "cost": cost}
    with open("{}.tdc".format(name), 'wb') as f:
        pickle.dump(data, f, -1)
    print(x[-1])
    print("ilqg took {} seconds".format(time.time() - start_time))
Пример #2
0
def optimize_path(start_pos, target_pos_in, name):
    global target_pos
    target_pos = target_pos_in
    dynamics_engine = MyRobotDynamics.cached_init("simulation")
    dynamics_engine.dt.set_value(0.1)
    dynamics_engine.loads["drivetrain"].position.set_value(start_pos)
    dynamics_engine.prediction_func()
    init_state = dynamics_engine.mean_state.get_value()

    # optimization problem
    T = 100  # horizon
    u0 = .1 * np.random.randn(T, 2)  # initial controls
    # u0 = zeros((T, 2))  # initial controls
    options = {}

    # run the optimization
    options["lims"] = np.array([[-1, 1], [-1, 1]])

    start_time = time.time()
    x, u, L, Vx, Vxx, cost = ilqg.ilqg(dynamics_engine, cost_func, init_state,
                                       u0, options)
    data = {"x": x, "u": u, "Vx": Vx, "Vxx": Vxx, "cost": cost}
    with open("{}.tdc".format(name), 'wb') as f:
        pickle.dump(data, f, -1)
    print(x[-1])
    print("ilqg took {} seconds".format(time.time() - start_time))
    def robotInit(self):
        # optimization problem
        T = 100              # horizon
        x0 = array([0,  0,  0])   # initial state
        u0 = .1*random.randn(T, 2)  # initial controls
        #u0 = zeros((T, 2))  # initial controls
        options = {}

        # run the optimization
        options["lims"] = array([[-1, 1],
                                 [-1, 1]])

        start_time = time.time()
        self.x, self.u, L, Vx, Vxx, cost = ilqg.ilqg(lambda x, u: dynamics_func(x, u), cost_func, x0, u0, options)
        self.i = 0
        print(self.x[-1])
        print("ilqg took {} seconds".format(time.time() - start_time))
        cost_graph(self.x)
        self.drive = wpilib.RobotDrive(0, 1)
        self.joystick = wpilib.Joystick(0)