コード例 #1
0
def test_cbc2_quadratic_terms():
    m = 2
    n = 3
    x_g = torch.rand(n)
    u0 = torch.rand(m)
    x = torch.rand(n)
    dt = 0.01
    cbf_gammas = [5., 5.]
    Kp = [0.9, 1.5, 0.]
    numSteps = 200
    clf = CLFCartesian(Kp = torch.tensor(Kp))
    ctrller = ControllerCLFBayesian(
            PiecewiseLinearPlanner(x, x_g, numSteps, dt),
            coordinate_converter = lambda x, x_g: x,
            dynamics = LearnedShiftInvariantDynamics(dt = dt,
                                                     mean_dynamics = AckermannDrive()),
            clf = clf,
            cbfs = obstacles_at_mid_from_start_and_goal(x , x_g),
            cbf_gammas = torch.tensor(cbf_gammas)

    )
    state = x
    state_goal = x_g
    t = 20
    (bfe, e), (V, bfv, v), mean, var = cbc2_quadratic_terms(
        lambda u: ctrller._clc(state, state_goal, u, t) * -1.0,
        state, torch.rand(m))
    assert to_numpy(bfe @ u0 + e) == pytest.approx(to_numpy(ctrller._clc(x, x_g, u0, t)), abs=1e-4, rel=1e-2)
コード例 #2
0
def test_affine_gp(dynamic_models, skip_test=False):
    learned_model, true_model, xtest, _ = dynamic_models
    true_cbf2 = RadialCBFRelDegree2(true_model)
    learned_cbf2 = RadialCBFRelDegree2(learned_model)
    f_gp = learned_model.f_func_gp()
    l1h_v2 = DeterministicGP(learned_cbf2.grad_cbf, xtest.shape,
                             name="∇ h(x)").t() @ f_gp
    if not skip_test:
        assert to_numpy(l1h_v2.mean(xtest)) == pytest.approx(to_numpy(
            true_cbf2.lie_f_cbf(xtest)),
                                                             rel=0.1)

    return l1h_v2
コード例 #3
0
def test_quadratic_form(dynamic_models, skip_test=False, dist=1e-4):
    learned_model, true_model, xtest, utest = dynamic_models
    true_cbf2 = RadialCBFRelDegree2(true_model)
    grad_l1h, l1h = test_gradient_gp(dynamic_models, skip_test=True)

    # covar_fu_f = partial(learned_model.covar_fu_f, utest)
    # covar_Lie1_fu = partial(l1h.covar, learned_model.fu_func_gp(utest))
    covar_grad_l1h_fu = partial(grad_l1h.covar,
                                learned_model.fu_func_gp(utest))
    fu_gp = learned_model.fu_func_gp(utest)
    l2h = grad_l1h.t() @ fu_gp
    if not skip_test:
        assert to_numpy(l2h.mean(xtest)) == pytest.approx(to_numpy(
            true_cbf2.lie2_f_h_col(xtest) +
            true_cbf2.lie_g_lie_f_h_col(xtest) * utest)[0],
                                                          abs=0.1,
                                                          rel=0.4)
    l2h.knl(xtest, xtest)

    l2h_v2 = grad_l1h.t() @ fu_gp
    if not skip_test:
        assert to_numpy(l2h_v2.mean(xtest)) == pytest.approx(to_numpy(
            true_cbf2.lie2_f_h_col(xtest) +
            true_cbf2.lie_g_lie_f_h_col(xtest) * utest)[0],
                                                             abs=0.1,
                                                             rel=0.4)
        assert to_numpy(l2h.knl(xtest, xtest)) == pytest.approx(
            to_numpy(l2h_v2.knl(xtest, xtest)))
        assert to_numpy(l2h.covar(fu_gp, xtest, xtest)) == pytest.approx(
            to_numpy(l2h_v2.covar(fu_gp, xtest, xtest)))
    return l2h
コード例 #4
0
def test_cbf2_gp(dynamic_models):
    learned_model, true_model, xtest, utest = dynamic_models
    true_cbf2 = RadialCBFRelDegree2(true_model)
    learned_cbf2 = RadialCBFRelDegree2(learned_model)
    cbc2 = cbc2_gp(learned_cbf2.cbf,
                   learned_cbf2.grad_cbf,
                   learned_model,
                   utest,
                   k_α=learned_cbf2.k_alpha)
    assert to_numpy(cbc2.mean(xtest)) == pytest.approx(
        to_numpy(-true_cbf2.A(xtest) @ utest + true_cbf2.b(xtest)),
        rel=0.1,
        abs=0.1)
    cbc2.knl(xtest, xtest)
コード例 #5
0
def test_lie2_gp(dynamic_models):
    learned_model, true_model, xtest, utest = dynamic_models
    true_cbf2 = RadialCBFRelDegree2(true_model)
    cbf2 = RadialCBFRelDegree2(learned_model)

    f_gp = learned_model.f_func_gp()
    fu_gp = learned_model.fu_func_gp(utest)
    L2h = GradientGP(DeterministicGP(
        cbf2.grad_cbf, shape=xtest.shape, name="∇ h(x)").t() @ f_gp,
                     x_shape=xtest.shape).t() @ fu_gp
    assert to_numpy(L2h.mean(xtest)) == pytest.approx(to_numpy(
        true_cbf2.lie2_f_h_col(xtest) +
        true_cbf2.lie_g_lie_f_h_col(xtest) * utest)[0],
                                                      abs=0.1,
                                                      rel=0.4)
    L2h.knl(xtest, xtest)
    return L2h
コード例 #6
0
def test_gradient_f_gp(dynamic_models, skip_test=False, dist=1e-4):
    learned_model, true_model, xtest, utest = dynamic_models
    grad_f = GradientGP(DeterministicGP(
        lambda x: torch.tensor([1., 0.]), shape=(2, ), name="[1, 0]").t()
                        @ learned_model.fu_func_gp(utest),
                        x_shape=(2, ))

    def xdot_func(x):
        return true_model.f_func(x)[0] + (true_model.g_func(x) @ utest)[0]

    with variable_required_grad(xtest):
        true_grad_f = torch.autograd.grad(xdot_func(xtest), xtest)[0]
    if not skip_test:
        assert to_numpy(grad_f.mean(xtest)) == pytest.approx(
            to_numpy(true_grad_f), abs=0.1, rel=0.4)
    grad_f.knl(xtest, xtest)
    return grad_f
コード例 #7
0
ファイル: planner.py プロジェクト: wecacuee/Bayesian_CBF
 def _knots(self):
     numSteps = self.numSteps
     x0 = to_numpy(self.x0)
     x_goal = to_numpy(self.x_goal)
     xdiff = (x_goal[:2] - x0[:2])
     desired_theta = np.arctan2(xdiff[1], xdiff[0])
     t_first_step = max(int(numSteps * 0.1), 1)
     t_second_stage = min(int(numSteps * 0.9), numSteps - 1)
     dx = (x_goal - x0) / (t_second_stage - t_first_step)
     t_mid = (t_second_stage + t_first_step) / 2
     x_mid = (x0 + x_goal) / 2
     return np.array(
         [[0, x0[0], x0[1], x0[2]],
          [t_first_step, x0[0], x0[1], desired_theta],
          [t_first_step + 1, x0[0] + dx[0], x0[1] + dx[1], desired_theta],
          [t_mid, x_mid[0], x_mid[1], desired_theta],
          [
              t_second_stage - 1, x_goal[0] - dx[0], x_goal[1] - dx[1],
              desired_theta
          ], [t_second_stage, x_goal[0], x_goal[1], desired_theta],
          [numSteps, x_goal[0], x_goal[1], x_goal[2]]])
コード例 #8
0
def test_convert_cbc_terms_to_socp_term(m=2, extravars=2):
    bfe = torch.rand((m, ))
    e = torch.rand(1)
    V_hom = random_psd(m + 1)
    V = V_hom[1:, 1:]
    bfv = V_hom[1:, 0] * 2
    v = V_hom[0, 0]
    u = torch.rand((m, ))
    A, bfb, bfc, d = SOCPController.convert_cbc_terms_to_socp_terms(
        bfe, e, V, bfv, v, extravars, testing=True)
    y_u = torch.cat((torch.zeros(extravars), u))
    std_rhs = (A @ y_u + bfb).norm()
    mean_rhs = bfc @ y_u + d
    std_lhs = torch.sqrt(u.T @ V @ u + bfv @ u + v)
    mean_lhs = bfe @ u + e
    assert to_numpy(mean_lhs) == pytest.approx(to_numpy(mean_rhs),
                                               abs=1e-4,
                                               rel=1e-2)
    assert to_numpy(std_lhs) == pytest.approx(to_numpy(std_rhs),
                                              abs=1e-4,
                                              rel=1e-2)
    assert to_numpy(mean_lhs + std_lhs) == pytest.approx(to_numpy(mean_rhs +
                                                                  std_rhs),
                                                         abs=1e-4,
                                                         rel=1e-2)
コード例 #9
0
def test_gradient_gp(dynamic_models,
                     skip_test=False,
                     dist=1e-4,
                     grad_check=True):
    learned_model, true_model, xtest, _ = dynamic_models
    if grad_check:
        learned_model.double_()
        func = lambda lm, X: lm.f_func_knl(X, xtest.double())[0, 0]
        with variable_required_grad(xtest):
            torch.autograd.gradcheck(partial(func, learned_model),
                                     xtest.double())
        learned_model.float_()
    l1h = test_affine_gp(dynamic_models, skip_test=True)
    true_cbf2 = RadialCBFRelDegree2(true_model)
    grad_l1h = GradientGP(l1h, x_shape=xtest.shape)
    if not skip_test:
        assert to_numpy(grad_l1h.mean(xtest)) == pytest.approx(to_numpy(
            true_cbf2.grad_lie_f_cbf(xtest)),
                                                               abs=0.1,
                                                               rel=0.4)
    grad_l1h.knl(xtest, xtest)
    return grad_l1h, l1h
コード例 #10
0
def test_gradient_simple():
    m = torch.rand(2)
    lengthscale = torch.rand(2)
    simp_gp = SimpleGP(m, lengthscale)
    grad_simp_gp = GradientGP(simp_gp, x_shape=(2, ), analytical_hessian=True)
    xtest = torch.rand(2)
    assert to_numpy(simp_gp.grad_mean(xtest)) == pytest.approx(
        to_numpy(grad_simp_gp.mean(xtest)))
    assert to_numpy(simp_gp.knl_hessian(xtest, xtest)) == pytest.approx(
        to_numpy(grad_simp_gp.knl(xtest, xtest)))
    xtestp = torch.rand(2)
    assert to_numpy(simp_gp.knl_hessian(xtest,
                                        xtestp)) == pytest.approx(to_numpy(
                                            grad_simp_gp.knl(xtest, xtestp)),
                                                                  rel=1e-3,
                                                                  abs=1e-5)
コード例 #11
0
def test_GP_train_predict(n=2,
                          m=3,
                          D=50,
                          deterministic=False,
                          rel_tol=0.10,
                          abs_tol=0.80,
                          perturb_scale=0.1,
                          sample_generator=sample_generator_trajectory,
                          dynamics_model_class=RandomDynamicsModel,
                          training_iter=100,
                          grad_predict=False):
    if grad_predict:
        deterministic = True
    chosen_seed = torch.randint(100000, (1, ))
    #chosen_seed = 52648
    print("Random seed: {}".format(chosen_seed))
    torch.manual_seed(chosen_seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # Collect training data
    dynamics_model = dynamics_model_class(m, n, deterministic=deterministic)
    Xdot, X, U = sample_generator(dynamics_model, D)
    if X.shape[-1] == 2 and U.shape[-1] == 1:
        plot_results(torch.arange(U.shape[0]),
                     omega_vec=X[:-1, 0],
                     theta_vec=X[:-1, 1],
                     u_vec=U[:, 0])

    # Test train split
    shuffled_order = np.arange(D)
    #shuffled_order = torch.randint(D, size=(D,))
    np.random.shuffle(shuffled_order)
    shuffled_order = torch.from_numpy(shuffled_order)
    train_indices = shuffled_order[:int(D * 0.8)]
    test_indices = shuffled_order[int(D * 0.8):]

    # Train data
    Xtrain, Utrain, XdotTrain = [Mat[train_indices, :] for Mat in (X, U, Xdot)]
    UHtrain = torch.cat((Utrain.new_ones((Utrain.shape[0], 1)), Utrain), dim=1)
    # Test data
    Xtest, Utest, XdotTest = [Mat[test_indices, :] for Mat in (X, U, Xdot)]

    # Call the training routine
    dgp = ControlAffineRegressor(Xtrain.shape[-1], Utrain.shape[-1])
    dgp_exact = ControlAffineRegressorExact(Xtrain.shape[-1], Utrain.shape[-1])
    dgp_vector = ControlAffineRegressorVector(Xtrain.shape[-1],
                                              Utrain.shape[-1])
    # Test prior
    _ = dgp.predict(Xtest, return_cov=False)
    dgp._fit_with_warnings(Xtrain,
                           Utrain,
                           XdotTrain,
                           training_iter=training_iter,
                           lr=0.01)
    _, _ = dgp_exact.custom_predict(Xtest, compute_cov=False)
    dgp_exact._fit_with_warnings(Xtrain,
                                 Utrain,
                                 XdotTrain,
                                 training_iter=training_iter,
                                 lr=0.01)
    _, _ = dgp_vector.custom_predict(Xtest, compute_cov=False)
    dgp_vector._fit_with_warnings(Xtrain,
                                  Utrain,
                                  XdotTrain,
                                  training_iter=training_iter,
                                  lr=0.01)
    if X.shape[-1] == 2 and U.shape[-1] == 1 and PLOTTING:
        plot_learned_2D_func(Xtrain.detach().cpu().numpy(), dgp.f_func,
                             dynamics_model.f_func)
        plt.savefig('/tmp/f_learned_vs_f_true.pdf')
        plot_learned_2D_func(Xtrain.detach().cpu().numpy(),
                             dgp.g_func,
                             dynamics_model.g_func,
                             axtitle="g(x)[{i}]")
        plt.savefig('/tmp/g_learned_vs_g_true.pdf')

    # check predicting training values
    #FXT_train_mean, FXT_train_cov = dgp.predict(Xtrain)
    #XdotGot_train = XdotTrain.new_empty(XdotTrain.shape)
    #for i in range(Xtrain.shape[0]):
    #    XdotGot_train[i, :] = FXT_train_mean[i, :, :].T @ UHtrain[i, :]
    predict_flatten_deprecated = True
    if not predict_flatten_deprecated:
        XdotGot_train_mean, XdotGot_train_cov = dgp._predict_flatten(
            Xtrain[:-1], Utrain[:-1])
        assert XdotGot_train_mean.detach().cpu().numpy() == pytest.approx(
            XdotTrain[:-1].detach().cpu().numpy(), rel=rel_tol,
            abs=abs_tol), """
            Train data check using original flatten predict """

    UHtest = torch.cat((Utest.new_ones((Utest.shape[0], 1)), Utest), dim=1)
    if deterministic:
        FXTexpected = torch.empty((Xtest.shape[0], 1 + m, n))
        for i in range(Xtest.shape[0]):
            FXTexpected[i, ...] = torch.cat((dynamics_model.f_func(
                Xtest[i, :])[None, :], dynamics_model.g_func(Xtest[i, :]).T),
                                            dim=0)
            assert torch.allclose(XdotTest[i, :],
                                  FXTexpected[i, :, :].T @ UHtest[i, :])

    # check predicting train values
    XdotTrain_mean = dgp.fu_func_mean(Utrain[:-1], Xtrain[:-1])
    XdotTrain_mean_exact = dgp_exact.fu_func_mean(Utrain[:-1], Xtrain[:-1])
    XdotTrain_mean_vector = dgp_vector.fu_func_mean(Utrain[:-1], Xtrain[:-1])
    assert XdotTrain_mean.detach().cpu().numpy() == pytest.approx(
        XdotTrain[:-1].detach().cpu().numpy(), rel=rel_tol, abs=abs_tol), """
        Train data check using custom flatten predict """
    assert XdotTrain_mean_exact.detach().cpu().numpy() == pytest.approx(
        XdotTrain[:-1].detach().cpu().numpy(), rel=rel_tol, abs=abs_tol), """
        Train data check using ControlAffineRegressorExact.custom_predict """
    assert XdotTrain_mean_vector.detach().cpu().numpy() == pytest.approx(
        XdotTrain[:-1].detach().cpu().numpy(), rel=rel_tol, abs=abs_tol), """
        Train data check using ControlAffineRegressorExact.custom_predict """

    if grad_predict and n == 1:
        x0 = Xtrain[9:10, :].detach().clone()
        u0 = Utrain[9:10, :].detach().clone()
        #est_grad_fx = dgp.grad_fu_func_mean(x0, u0)
        true_fu_func = lambda X: dynamics_model.f_func(
            X) + dynamics_model.g_func(X).bmm(u0.unsqueeze(-1)).squeeze(-1)
        with variable_required_grad(x0):
            true_grad_fx = torch.autograd.grad(true_fu_func(x0), x0)[0]
        with variable_required_grad(x0):
            est_grad_fx_2 = torch.autograd.grad(dgp.fu_func_mean(u0, x0),
                                                x0)[0]
        assert to_numpy(est_grad_fx_2) == pytest.approx(to_numpy(true_grad_fx),
                                                        rel=rel_tol,
                                                        abs=abs_tol)
        #assert to_numpy(est_grad_fx) == pytest.approx(to_numpy(true_grad_fx), rel=rel_tol, abs=abs_tol)

    # Check predicting perturbed train values
    Xptrain = Xtrain[:-1] * (
        1 + torch.rand(Xtrain.shape[0] - 1, 1) * perturb_scale)
    Uptrain = Utrain[:-1] * (
        1 + torch.rand(Xtrain.shape[0] - 1, 1) * perturb_scale)
    Xdot_ptrain = dynamics_model.f_func(Xptrain) + dynamics_model.g_func(
        Xptrain).bmm(Uptrain.unsqueeze(-1)).squeeze(-1)
    if not predict_flatten_deprecated:
        XdotGot_ptrain_mean, XdotGot_ptrain_cov = dgp._predict_flatten(
            Xptrain, Uptrain)
        assert XdotGot_ptrain_mean.detach().cpu().numpy() == pytest.approx(
            Xdot_ptrain.detach().cpu().numpy(), rel=rel_tol, abs=abs_tol), """
            Perturbed Train data check using original flatten predict """

    XdotGot_ptrain_mean_custom = dgp.fu_func_mean(Uptrain, Xptrain)
    assert XdotGot_ptrain_mean_custom.detach().cpu().numpy() == pytest.approx(
        Xdot_ptrain.detach().cpu().numpy(), rel=rel_tol, abs=abs_tol), """
        Perturbed Train data check using custom flatten predict """

    # check predicting test values
    # FXTmean, FXTcov = dgp.predict(Xtest)
    # XdotGot = XdotTest.new_empty(XdotTest.shape)
    # for i in range(Xtest.shape[0]):
    #     XdotGot[i, :] = FXTmean[i, :, :].T @ UHtest[i, :]
    if not predict_flatten_deprecated:
        XdotGot_mean, XdotGot_cov = dgp.predict_flatten(Xtest, Utest)
        assert XdotGot_mean.detach().cpu().numpy() == pytest.approx(
            XdotTest.detach().cpu().numpy(), rel=rel_tol, abs=abs_tol)
        #abs=XdotGot_cov.flatten().max())

    # check predicting test values
    Xdot_mean = dgp.fu_func_mean(Utest, Xtest)
    Xdot_mean_exact = dgp_exact.fu_func_mean(Utest, Xtest)
    Xdot_mean_vector = dgp_vector.fu_func_mean(Utest, Xtest)
    assert Xdot_mean.detach().cpu().numpy() == pytest.approx(
        XdotTest.detach().cpu().numpy(), rel=rel_tol, abs=abs_tol)
    assert Xdot_mean_exact.detach().cpu().numpy() == pytest.approx(
        XdotTest.detach().cpu().numpy(), rel=rel_tol, abs=abs_tol)
    assert Xdot_mean_vector.detach().cpu().numpy() == pytest.approx(
        XdotTest.detach().cpu().numpy(), rel=rel_tol, abs=abs_tol)
    return dgp, dynamics_model
コード例 #12
0
def plot_pendulum_covariances(theta0=5 * math.pi / 6,
                              omega0=-0.01,
                              tau=0.01,
                              max_train=200,
                              ntest=1,
                              numSteps=1000,
                              mass=1,
                              gravity=10,
                              length=1,
                              logger_class=partial(
                                  TBLogger,
                                  exp_tags=['pendulum_covariances'],
                                  runs_dir='data/runs'),
                              pendulum_dynamics_class=PendulumDynamicsModel):
    logger = logger_class()
    pend_env = pendulum_dynamics_class(m=1,
                                       n=2,
                                       mass=mass,
                                       gravity=gravity,
                                       length=length)
    dX, X, U = sampling_pendulum_data(pend_env,
                                      D=numSteps,
                                      x0=torch.tensor([theta0, omega0]),
                                      dt=tau,
                                      controller=ControlRandom(
                                          mass=mass,
                                          gravity=gravity,
                                          length=length).control,
                                      plot_every_n_steps=numSteps)

    shuffled_order = np.arange(X.shape[0] - 1)

    learned_models = {}
    shuffled_order = np.arange(X.shape[0] - 1)

    # Test train split
    np.random.shuffle(shuffled_order)
    shuffled_order_t = torch.from_numpy(shuffled_order)

    train_indices = shuffled_order_t[:max_train]
    Xtrain = X[train_indices, :]
    Utrain = U[train_indices, :]
    XdotTrain = dX[train_indices, :]

    Xtest = X[shuffled_order_t[-ntest:], :]

    lm_matrix = ControlAffineRegressorExact(Xtrain.shape[-1], Utrain.shape[-1])
    lm_matrix.fit(Xtrain, Utrain, XdotTrain, training_iter=50)
    meanFX, A, BkXX = lm_matrix._custom_predict_matrix(Xtest,
                                                       Xtest,
                                                       compute_cov=True)
    fig, ax = plt.subplots(1, 2, squeeze=False)
    ax[0, 0].set_title('Var[f(x)]')
    plot_covariance(ax[0, 0], to_numpy(BkXX[0, 0, 0, 0] * A))
    ax[0, 1].set_title('Var[g(x)]')
    plot_covariance(ax[0, 1], to_numpy(BkXX[0, 0, 1, 1] * A))
    # ax[0, 2].set_title('cov[f(x), g(x)]')
    # plot_covariance(ax[0, 2], to_numpy(BkXX[0, 0, 0, 1] * A))

    lm_vector = ControlAffineRegressorVector(Xtrain.shape[-1],
                                             Utrain.shape[-1])
    lm_vector.fit(Xtrain, Utrain, XdotTrain, training_iter=50)
    meanFX, KkXX = lm_vector._custom_predict_matrix(Xtest,
                                                    Xtest,
                                                    compute_cov=True)
    plt.savefig('MVGP_covariances.pdf')

    fig, ax = plt.subplots(1, 2, squeeze=False)
    ax[0, 0].set_title('Var[f(x)]')
    plot_covariance(ax[0, 0], to_numpy(KkXX[0, 0, :2, :2]))
    ax[0, 1].set_title('Var[g(x)]')
    plot_covariance(ax[0, 1], to_numpy(KkXX[0, 0, 2:, 2:]))
    # ax[1, 2].set_title('cov[f(x), g(x)]')
    # plot_covariance(ax[1, 2], to_numpy(KkXX[0, 0, :2, 2:]))

    plt.savefig('Corregionalization_covariances.pdf')
コード例 #13
0
def unicycle_plot_covariances_exp(
    max_train=200,  # testing GPU
    state_start=[-3, -1, -math.pi / 4],
    state_goal=[0, 0, math.pi / 4],
    numSteps=512,
    dt=0.01,
    true_dynamics_gen=partial(AckermannDrive, L=1.0),
    mean_dynamics_gen=partial(AckermannDrive, L=12.0),
    logger_class=partial(TBLogger,
                         exp_tags=['unicycle_plot_covariances'],
                         runs_dir='data/runs'),
    exps=dict(matrix=dict(regressor_class=partial(
        LearnedShiftInvariantDynamics,
        learned_dynamics_class=ControlAffineRegressorExact)),
              vector=dict(regressor_class=partial(
                  LearnedShiftInvariantDynamics,
                  learned_dynamics_class=ControlAffineRegressorVector))),
):
    logger = logger_class()
    bayes_cbf_unicycle.TBLOG = logger.summary_writer
    true_dynamics_model = true_dynamics_gen()

    # Generate training data
    Xdot, X, U = sample_generator_trajectory(
        dynamics_model=true_dynamics_model,
        D=numSteps,
        controller=ControllerCLF(NoPlanner(torch.tensor(state_goal)),
                                 coordinate_converter=lambda x, x_g: x,
                                 dynamics=CartesianDynamics(),
                                 clf=CLFCartesian()).control,
        visualizer=VisualizerZ(),
        x0=state_start,
        dt=dt)

    # Log training data
    for t, (dx, x, u) in enumerate(zip(Xdot, X, U)):
        logger.add_tensors("traj", dict(dx=dx, x=x, u=u), t)

    shuffled_order = np.arange(X.shape[0] - 1)

    dgp = dict()
    # Test train split
    np.random.shuffle(shuffled_order)
    shuffled_order_t = torch.from_numpy(shuffled_order)
    train_indices = shuffled_order_t[:max_train]
    Xtrain = X[train_indices, :]
    Utrain = U[train_indices, :]
    XdotTrain = Xdot[train_indices, :]

    slice_range = []
    for i, num in enumerate([1, 1, 20]):
        x_range = slice(*list(
            map(float, (Xtrain[:, i].min(), Xtrain[:, i].max(),
                        (Xtrain[:, i].max() - Xtrain[:, i].min()) / num))))
        slice_range.append(x_range)
    xtest_grid = np.mgrid[tuple(slice_range)]
    Xtest = torch.from_numpy(xtest_grid.reshape(-1, Xtrain.shape[-1])).to(
        dtype=Xtrain.dtype, device=Xtrain.device)
    # b, n, 1+m
    FX_true = true_dynamics_model.F_func(Xtest).transpose(-2, -1)

    for name, kw in exps.items():
        model = kw['regressor_class'](dt=dt,
                                      mean_dynamics=mean_dynamics_gen(),
                                      max_train=max_train)
        model.fit(Xtrain, Utrain, XdotTrain, training_iter=50)
        # b(1+m)n
        FX_learned, var_FX = model.custom_predict_fullmat(
            Xtest.reshape(-1, Xtest.shape[-1]))
        b = Xtest.shape[0]
        n = true_dynamics_model.state_size
        m = true_dynamics_model.ctrl_size
        var_FX_t = var_FX.reshape(b, (1 + m) * n, b, (1 + m) * n)
        var_FX_diag_t = torch.empty((b, (1 + m) * n, (1 + m) * n),
                                    dtype=var_FX_t.dtype,
                                    device=var_FX_t.device)
        for i in range(b):
            var_FX_diag_t[i, :, :] = var_FX_t[i, :, i, :]

        # log FX_learned and var_FX_diag_t
        logger.add_tensors(name, dict(var_FX_diag_t=to_numpy(var_FX_diag_t)),
                           max_train)

    # Find the latest edited event file from log dir
    events_file = max(glob.glob(
        osp.join(logger.experiment_logs_dir, "*.tfevents*")),
                      key=lambda f: os.stat(f).st_mtime)
    return events_file