def main():
    sample_num = 100
    m = np.array([0., 0.])
    cov = np.array([[1., 0.0], [0.0, 1.]])

    sampler = GibsSampler(m, cov)

    history_means = []
    history_covs = []
    history_KL_score = []

    for i in range(sample_num):
        sampler.sample()

        if i > 5:
            est_m, est_cov = sampler.estimate_val()
            history_means.append(est_m)
            history_covs.append(est_cov)

            KL_score = calc_KL_div(est_m, est_cov, m, cov)
            history_KL_score.append(KL_score)

    observation_points = np.array([
        sampler.points_x1[:sample_num - 5], sampler.points_x2[:sample_num - 5]
    ])
    objects = [history_means, history_covs, m, cov, history_KL_score]

    animation = AnimDrawer(objects, observation_points)
    animation.draw_anim()

    plt.plot(range(len(history_KL_score)), history_KL_score)
    plt.show()

    # est_m, est_cov = sampler.estimate_val()
    """
def main():
    dim = 2
    class_num = 3
    data = sampling_mixture_poisson(classes=class_num)

    classifier = GibbsMixturedPoisson(data, class_num, dim)

    iteration_num = 15

    for i in range(iteration_num):
        print("iteration num = {0}".format(i))
        history_s_sample, history_eata_sample = classifier.gibbs_sample()

    colors = ["r", "b", "g", "y"]

    for i, datum in enumerate(data):
        plt.plot(datum[0],
                 datum[1],
                 "o",
                 color=tuple(history_eata_sample[0][i]))

    plt.show()

    for i, datum in enumerate(data):
        plt.plot(datum[0],
                 datum[1],
                 "o",
                 color=tuple(history_eata_sample[-1][i]))

    plt.show()

    objects = [history_s_sample]
    animdrawer = AnimDrawer(objects, observation_points=data)

    animdrawer.draw_anim()
def main():
    sample_num = 100
    m = np.array([0., 0.])
    cov = np.array([[1., 0.75], [0.75, 1.]])

    sampler = VariantSampler(m, cov)

    history_means = []
    history_covs = []
    history_KL_score = []

    for _ in range(sample_num):
        est_m, est_cov = sampler.estimate_val()
        history_means.append(copy.deepcopy(est_m))
        history_covs.append(copy.deepcopy(est_cov))

        eig_val, eig_vec = np.linalg.eig(est_cov)

        print("eig_val = {0}".format(eig_val))
        print("eig_vec = {0}".format(eig_vec))

        KL_score = calc_KL_div(est_m, est_cov, m, cov)
        history_KL_score.append(KL_score)

    objects = [history_means, history_covs, m, cov, history_KL_score]

    animation = AnimDrawer(objects)
    animation.draw_anim()

    plt.plot(range(len(history_KL_score)), history_KL_score)
    plt.show()

    # est_m, est_cov = sampler.estimate_val()
    """
Ejemplo n.º 4
0
def main():
    """
    """
    # iteration parameters
    NUM_ITERARIONS = 500
    dt = 0.01

    # make plant
    init_x = np.array([0., 0., 0.5*math.pi])
    car = TwoWheeledCar(init_x)

    # make goal
    target = np.array([5., 3., 0.])

    # controller
    controller = iLQRController()


    for iteration in range(NUM_ITERARIONS):
        print("iteration num = {} / {}".format(iteration, NUM_ITERARIONS))

        if iteration == 0:
            changed = True

        u = controller.calc_input(car, target, changed=changed)

        car.update_state(u, dt)

    # figures and animation
    history_states = np.array(car.history_xs)

    time_fig = plt.figure()

    x_fig = time_fig.add_subplot(311)
    y_fig = time_fig.add_subplot(312)
    th_fig = time_fig.add_subplot(313)

    time = len(history_states)
    x_fig.plot(np.arange(time), history_states[:, 0])
    y_fig.plot(np.arange(time), history_states[:, 1])
    th_fig.plot(np.arange(time), history_states[:, 2])

    plt.show()

    history_states = np.array(car.history_xs)

    animdrawer = AnimDrawer([history_states, target])
    animdrawer.draw_anim()
Ejemplo n.º 5
0
def main():
    # iteration number
    iteration_num = 50

    # history
    history_means = []
    history_deviations = []
    history_points = []
    # make data set
    X_data = []
    Y_data = []

    for _ in range(iteration_num):
        # make model
        dim = 10 # dimention of the linear regression model
        regressioner = BayezianRegression(dim) # make model

        # make data    
        x_1 = (7.5 + 1.) * np.random.rand() + (- 1.)
        y = math.sin(x_1)
        temp_xs = [1.]
        for _ in range(dim-1):
            temp_xs.append(temp_xs[-1] * x_1)
        
        X_data.append(temp_xs)
        Y_data.append(y)
        # save
        history_points.append([x_1, y])
        
        # to numpy
        X_data = np.array(X_data)
        Y_data = np.array(Y_data)

        # fit the model
        regressioner.fit(X_data, Y_data)

        # prediction 
        mus = []
        devs = []
        
        for sample_x in np.arange(-1., 7.5, 0.1):
            # make X_data
            x_data = [1.]
            for _ in range(dim-1):
                x_data.append(x_data[-1] * sample_x)

            mu_opt, lam_inv_opt, deviation = regressioner.predict_distribution(np.array(x_data))

            mus.append(mu_opt)
            devs.append(deviation)
        
        mus = np.array(mus).flatten()
        devs = np.array(devs)

        # save
        history_means.append(mus)
        history_deviations.append(devs)

        X_data = X_data.tolist()
        # print("X\data = {0}".format(X_data))
        Y_data = Y_data.tolist()

    draw_obj = [np.array(history_points), history_means, history_deviations]
    animdrawer = AnimDrawer(draw_obj)
    animdrawer.draw_anim()
Ejemplo n.º 6
0
def main():
    """
    """
    # iteration parameters
    NUM_ITERATIONS = 500
    dt = 0.01

    # make plant
    init_x = np.array([0., 0., 0.5 * math.pi])
    car = TwoWheeledCar(init_x)

    # make goal
    goal_maker = GoalMaker(goal_type="sin")
    goal_maker.preprocess()

    # controller
    controller = iLQRController()

    for iteration in range(NUM_ITERATIONS):
        print("iteration num = {} / {}".format(iteration, NUM_ITERATIONS))

        target = goal_maker.calc_goal(car.xs)
        u = controller.calc_input(car, target)
        car.update_state(u, dt)  # update state

    # figures and animation
    history_states = np.array(car.history_xs)
    history_targets = np.array(goal_maker.history_target)

    time_fig = plt.figure()

    x_fig = time_fig.add_subplot(311)
    y_fig = time_fig.add_subplot(312)
    th_fig = time_fig.add_subplot(313)

    time = len(history_states)

    x_fig.plot(np.arange(time), history_states[:, 0], "r")
    x_fig.plot(np.arange(1, time),
               history_targets[:, 0],
               "b",
               linestyle="dashdot")
    x_fig.set_ylabel("x")

    y_fig.plot(np.arange(time), history_states[:, 1], "r")
    y_fig.plot(np.arange(1, time),
               history_targets[:, 1],
               "b",
               linestyle="dashdot")
    y_fig.set_ylabel("y")

    th_fig.plot(np.arange(time), history_states[:, 2], "r")
    th_fig.plot(np.arange(1, time),
                history_targets[:, 2],
                "b",
                linestyle="dashdot")
    th_fig.set_ylabel("th")

    plt.show()

    history_states = np.array(car.history_xs)
    animdrawer = AnimDrawer([history_states, history_targets])
    animdrawer.draw_anim()
def main():
    # parameters
    dt = 0.01
    simulation_time = 20  # in seconds
    PREDICT_STEP = 30
    iteration_num = int(simulation_time / dt)

    # make simulator with coninuous matrix
    init_xs_lead = np.array([0., 0., math.pi / 6, 0.])
    lead_car = WheeledSystem(init_states=init_xs_lead)

    # make system model
    lead_car_system_model = SystemModel()

    # reference
    history_traj_ref = []
    history_angle_ref = []
    traj_ref_xs, traj_ref_ys = make_sample_traj(int(simulation_time / dt))
    traj_ref = np.array([traj_ref_xs, traj_ref_ys])

    # nearest point
    index_min, nearest_point = search_nearest_point(
        traj_ref, lead_car.xs[:2].reshape(2, 1))

    # get traj's curvature
    NUM_SKIP = 3
    MARGIN = 20
    angles, curvatures = calc_curvatures(
        traj_ref[:, index_min + MARGIN:index_min + PREDICT_STEP +
                 2 * NUM_SKIP + MARGIN], PREDICT_STEP, NUM_SKIP)

    # save traj ref
    history_traj_ref.append(traj_ref[:, index_min + MARGIN:index_min +
                                     PREDICT_STEP + 2 * NUM_SKIP + MARGIN])
    history_angle_ref.append(angles)

    # print(history_traj_ref)
    # input()

    # evaluation function weight
    Q = np.diag([1000000., 10., 1.])
    R = np.diag([0.1])

    # System model update
    V = calc_ideal_vel(traj_ref,
                       dt)  # in pratical we should calc from the state
    lead_car_system_model.calc_predict_sytem_model(V, curvatures, PREDICT_STEP)

    # make controller with discreted matrix
    lead_controller = IterativeMpcController(lead_car_system_model,
                                             Q,
                                             R,
                                             PREDICT_STEP,
                                             dt_input_upper=np.array([1 * dt]),
                                             dt_input_lower=np.array([-1 * dt
                                                                      ]),
                                             input_upper=np.array([1.]),
                                             input_lower=np.array([-1.]))

    # initialize
    lead_controller.initialize_controller()

    for i in range(iteration_num):
        print("simulation time = {0}".format(i))

        ## lead
        # world traj
        lead_states = lead_car.xs

        # nearest point
        index_min, nearest_point = search_nearest_point(
            traj_ref, lead_car.xs[:2].reshape(2, 1))

        # end check
        if len(traj_ref_ys
               ) <= index_min + PREDICT_STEP + 2 * NUM_SKIP + MARGIN:
            print("break")
            break

        # get traj's curvature
        angles, curvatures = calc_curvatures(
            traj_ref[:, index_min + MARGIN:index_min + PREDICT_STEP +
                     2 * NUM_SKIP + MARGIN], PREDICT_STEP, NUM_SKIP)

        # save
        history_traj_ref.append(traj_ref[:, index_min + MARGIN:index_min +
                                         PREDICT_STEP + 2 * NUM_SKIP + MARGIN])
        history_angle_ref.append(angles)

        # System model update
        V = calc_ideal_vel(traj_ref,
                           dt)  # in pratical we should calc from the state
        lead_car_system_model.calc_predict_sytem_model(V, curvatures,
                                                       PREDICT_STEP)

        # transformation
        # car
        relative_car_position = coordinate_transformation_in_position(
            lead_states[:2].reshape(2, 1), nearest_point)
        relative_car_position = coordinate_transformation_in_angle(
            relative_car_position, angles[0])

        relative_car_angle = lead_states[2] - angles[0]
        relative_car_state = np.hstack(
            (relative_car_position[1], relative_car_angle, lead_states[-1]))

        # traj_ref
        relative_traj = coordinate_transformation_in_position(
            traj_ref[:, index_min:index_min + PREDICT_STEP], nearest_point)
        relative_traj = coordinate_transformation_in_angle(
            relative_traj, angles[0])
        relative_ref_angle = np.array(angles) - angles[0]

        # make ref
        lead_reference = np.array(
            [[relative_traj[1, -1], relative_ref_angle[-1], 0.]
             for i in range(PREDICT_STEP)]).flatten()

        print("relative car state = {}".format(relative_car_state))
        print("nearest point = {}".format(nearest_point))
        # input()

        # update system matrix
        lead_controller.update_system_model(lead_car_system_model)

        lead_opt_u, all_opt_u = lead_controller.calc_input(
            relative_car_state, lead_reference)

        lead_opt_u = np.hstack((np.array([V]), lead_opt_u))

        all_opt_u = np.stack((np.ones(PREDICT_STEP) * V, all_opt_u.flatten()))

        print("opt_u = {}".format(lead_opt_u))
        print("all_opt_u = {}".format(all_opt_u))

        # predict
        lead_car.predict_state(all_opt_u, dt=dt)

        # update
        lead_car.update_state(lead_opt_u, dt=dt)

        # print(lead_car.history_predict_xs)
        # input()

    # figures and animation
    lead_history_states = np.array(lead_car.history_xs)
    lead_history_predict_states = lead_car.history_predict_xs
    """
    time_history_fig = plt.figure()
    x_fig = time_history_fig.add_subplot(311)
    y_fig = time_history_fig.add_subplot(312)
    theta_fig = time_history_fig.add_subplot(313)

    car_traj_fig = plt.figure()
    traj_fig = car_traj_fig.add_subplot(111)
    traj_fig.set_aspect('equal')

    x_fig.plot(np.arange(0, simulation_time+0.01, dt), lead_history_states[:, 0], label="lead")
    x_fig.plot(np.arange(0, simulation_time+0.01, dt), follow_history_states[:, 0], label="follow")
    x_fig.set_xlabel("time [s]")
    x_fig.set_ylabel("x")
    x_fig.legend()

    y_fig.plot(np.arange(0, simulation_time+0.01, dt), lead_history_states[:, 1], label="lead")
    y_fig.plot(np.arange(0, simulation_time+0.01, dt), follow_history_states[:, 1], label="follow")
    y_fig.plot(np.arange(0, simulation_time+0.01, dt), [4. for _ in range(iteration_num+1)], linestyle="dashed")
    y_fig.set_xlabel("time [s]")
    y_fig.set_ylabel("y")
    y_fig.legend()

    theta_fig.plot(np.arange(0, simulation_time+0.01, dt), lead_history_states[:, 2], label="lead")
    theta_fig.plot(np.arange(0, simulation_time+0.01, dt), follow_history_states[:, 2], label="follow")
    theta_fig.plot(np.arange(0, simulation_time+0.01, dt), [0. for _ in range(iteration_num+1)], linestyle="dashed")
    theta_fig.set_xlabel("time [s]")
    theta_fig.set_ylabel("theta")
    theta_fig.legend()

    time_history_fig.tight_layout()

    traj_fig.plot(lead_history_states[:, 0], lead_history_states[:, 1], label="lead")
    traj_fig.plot(follow_history_states[:, 0], follow_history_states[:, 1], label="follow")
    traj_fig.set_xlabel("x")
    traj_fig.set_ylabel("y")
    traj_fig.legend()
    plt.show()

    lead_history_us = np.array(lead_controller.history_us)
    follow_history_us = np.array(follow_controller.history_us)
    input_history_fig = plt.figure()
    u_1_fig = input_history_fig.add_subplot(111)

    u_1_fig.plot(np.arange(0, simulation_time+0.01, dt), lead_history_us[:, 0], label="lead")
    u_1_fig.plot(np.arange(0, simulation_time+0.01, dt), follow_history_us[:, 0], label="follow")
    u_1_fig.set_xlabel("time [s]")
    u_1_fig.set_ylabel("u_omega")
    
    input_history_fig.tight_layout()
    plt.show()
    """

    animdrawer = AnimDrawer([
        lead_history_states, lead_history_predict_states, traj_ref,
        history_traj_ref, history_angle_ref
    ])
    animdrawer.draw_anim()
Ejemplo n.º 8
0
def main():
    dt = 0.05
    simulation_time = 10  # in seconds
    iteration_num = int(simulation_time / dt)

    # you must be care about this matrix
    # these A and B are for continuos system if you want to use discret system matrix please skip this step
    # lineared car system
    V = 5.0
    A = np.array([[0., V], [0., 0.]])
    B = np.array([[0.], [1.]])

    C = np.eye(2)
    D = np.zeros((2, 1))

    # make simulator with coninuous matrix
    init_xs_lead = np.array([5., 0., 0.])
    init_xs_follow = np.array([0., 0., 0.])
    lead_car = TwoWheeledSystem(init_states=init_xs_lead)
    follow_car = TwoWheeledSystem(init_states=init_xs_follow)

    # create system
    sysc = matlab.ss(A, B, C, D)
    # discrete system
    sysd = matlab.c2d(sysc, dt)

    Ad = sysd.A
    Bd = sysd.B

    # evaluation function weight
    Q = np.diag([1., 1.])
    R = np.diag([5.])
    pre_step = 15

    # make controller with discreted matrix
    # please check the solver, if you want to use the scipy, set the MpcController_scipy
    lead_controller = MpcController_cvxopt(Ad,
                                           Bd,
                                           Q,
                                           R,
                                           pre_step,
                                           dt_input_upper=np.array([30 * dt]),
                                           dt_input_lower=np.array([-30 * dt]),
                                           input_upper=np.array([30.]),
                                           input_lower=np.array([-30.]))

    follow_controller = MpcController_cvxopt(
        Ad,
        Bd,
        Q,
        R,
        pre_step,
        dt_input_upper=np.array([30 * dt]),
        dt_input_lower=np.array([-30 * dt]),
        input_upper=np.array([30.]),
        input_lower=np.array([-30.]))

    lead_controller.initialize_controller()
    follow_controller.initialize_controller()

    # reference
    lead_reference = np.array([[0., 0.] for _ in range(pre_step)]).flatten()

    for i in range(iteration_num):
        print("simulation time = {0}".format(i))
        # make lead car's move
        if i > int(iteration_num / 3):
            lead_reference = np.array([[4., 0.]
                                       for _ in range(pre_step)]).flatten()

        lead_states = lead_car.xs
        lead_opt_u = lead_controller.calc_input(lead_states[1:],
                                                lead_reference)
        lead_opt_u = np.hstack((np.array([V]), lead_opt_u))

        # make follow car
        follow_reference = np.array([lead_states[1:]
                                     for _ in range(pre_step)]).flatten()
        follow_states = follow_car.xs

        follow_opt_u = follow_controller.calc_input(follow_states[1:],
                                                    follow_reference)
        follow_opt_u = np.hstack((np.array([V]), follow_opt_u))

        lead_car.update_state(lead_opt_u, dt=dt)
        follow_car.update_state(follow_opt_u, dt=dt)

    # figures and animation
    lead_history_states = np.array(lead_car.history_xs)
    follow_history_states = np.array(follow_car.history_xs)

    time_history_fig = plt.figure()
    x_fig = time_history_fig.add_subplot(311)
    y_fig = time_history_fig.add_subplot(312)
    theta_fig = time_history_fig.add_subplot(313)

    car_traj_fig = plt.figure()
    traj_fig = car_traj_fig.add_subplot(111)
    traj_fig.set_aspect('equal')

    x_fig.plot(np.arange(0, simulation_time + 0.01, dt),
               lead_history_states[:, 0],
               label="lead")
    x_fig.plot(np.arange(0, simulation_time + 0.01, dt),
               follow_history_states[:, 0],
               label="follow")
    x_fig.set_xlabel("time [s]")
    x_fig.set_ylabel("x")
    x_fig.legend()

    y_fig.plot(np.arange(0, simulation_time + 0.01, dt),
               lead_history_states[:, 1],
               label="lead")
    y_fig.plot(np.arange(0, simulation_time + 0.01, dt),
               follow_history_states[:, 1],
               label="follow")
    y_fig.plot(np.arange(0, simulation_time + 0.01, dt),
               [4. for _ in range(iteration_num + 1)],
               linestyle="dashed")
    y_fig.set_xlabel("time [s]")
    y_fig.set_ylabel("y")
    y_fig.legend()

    theta_fig.plot(np.arange(0, simulation_time + 0.01, dt),
                   lead_history_states[:, 2],
                   label="lead")
    theta_fig.plot(np.arange(0, simulation_time + 0.01, dt),
                   follow_history_states[:, 2],
                   label="follow")
    theta_fig.plot(np.arange(0, simulation_time + 0.01, dt),
                   [0. for _ in range(iteration_num + 1)],
                   linestyle="dashed")
    theta_fig.set_xlabel("time [s]")
    theta_fig.set_ylabel("theta")
    theta_fig.legend()

    time_history_fig.tight_layout()

    traj_fig.plot(lead_history_states[:, 0],
                  lead_history_states[:, 1],
                  label="lead")
    traj_fig.plot(follow_history_states[:, 0],
                  follow_history_states[:, 1],
                  label="follow")
    traj_fig.set_xlabel("x")
    traj_fig.set_ylabel("y")
    traj_fig.legend()
    plt.show()

    lead_history_us = np.array(lead_controller.history_us)
    follow_history_us = np.array(follow_controller.history_us)
    input_history_fig = plt.figure()
    u_1_fig = input_history_fig.add_subplot(111)

    u_1_fig.plot(np.arange(0, simulation_time + 0.01, dt),
                 lead_history_us[:, 0],
                 label="lead")
    u_1_fig.plot(np.arange(0, simulation_time + 0.01, dt),
                 follow_history_us[:, 0],
                 label="follow")
    u_1_fig.set_xlabel("time [s]")
    u_1_fig.set_ylabel("u_omega")

    input_history_fig.tight_layout()
    plt.show()

    animdrawer = AnimDrawer([lead_history_states, follow_history_states])
    animdrawer.draw_anim()