コード例 #1
0
def NG_dr1(X, verbosity = 0):
    """
    X: array of N points on Gr(n, p); N x n x p array
    aim to represent X by X_hat (N points on Gr(n-1, p)) 
    where X_hat_i = A^T X_i, A \in St(n, n-1)
    minimizing the projection error (using projection F-norm)
    """
    N, n, p = X.shape
    cpx = np.iscomplex(X).any() # true if X is complex-valued

    if cpx:
        man = Product([ComplexGrassmann(n, 1), Euclidean(p, 2)])
        
    else:
        man = Product([Grassmann(n, 1), Euclidean(p)])
    
    X_ = torch.from_numpy(X)
    
    @pymanopt.function.PyTorch
    def cost(v, b):
        vvT = torch.matmul(v, v.conj().t()) # n x n
        if cpx:
            b_ = b[:,0] + b[:,1]*1j
            b_ = torch.unsqueeze(b_, axis=1)
        else:
            b_ = torch.unsqueeze(b, axis=1)
        vbt = torch.matmul(v, b_.t()) # n x p
        IvvT = torch.eye(n, dtype=X_.dtype) - vvT
        d2 = 0
        for i in range(N):
            d2 = d2 + dist_proj(X_[i], torch.matmul(IvvT, X_[i]) + vbt)**2/N
            #d2 = d2 + dist_proj(X_[i], torch.matmul(AAT, X_[i]))**2/N
        return d2
    
    solver = SteepestDescent()
    problem = Problem(manifold=man, cost=cost, verbosity=verbosity)
    theta = solver.solve(problem)
    v = theta[0]
    b_ = theta[1]
    
    if cpx:
        b = b_[:,0] + b_[:,1]*1j
        b = np.expand_dims(b, axis=1)
    else:
        b = np.expand_dims(b_, axis=1)
    
    R = ortho_complement(v)
    tmp = np.array([R.conj().T for i in range(N)])
    X_low = multiprod(tmp, X)
    X_low = np.array([qr(X_low[i])[0] for i in range(N)])

    return X_low, R, v, b
コード例 #2
0
def NG_dr(X, m, verbosity=0, *args, **kwargs):
    """
    X: array of N points on Gr(n, p); N x n x p array
    aim to represent X by X_hat (N points on Gr(m, p), m < n) 
    where X_hat_i = R^T X_i, R \in St(n, m)
    minimizing the projection error (using projection F-norm)
    """
    N, n, p = X.shape
    cpx = np.iscomplex(X).any() # true if X is complex-valued

    if cpx:
        man = Product([ComplexGrassmann(n, m), Euclidean(n, p, 2)])
        
    else:
        man = Product([Grassmann(n, m), Euclidean(n, p)])
    
    X_ = torch.from_numpy(X)
    
    @pymanopt.function.PyTorch
    def cost(A, B):
        AAT = torch.matmul(A, A.conj().t()) # n x n
        if cpx:
            B_ = B[:,:,0] + B[:,:,1]*1j
        else:
            B_ = B
        IAATB = torch.matmul(torch.eye(n, dtype=X_.dtype) - AAT, B_) # n x p
        d2 = 0
        for i in range(N):
            d2 = d2 + dist_proj(X_[i], torch.matmul(AAT, X_[i]) + IAATB)**2/N
            #d2 = d2 + dist_proj(X_[i], torch.matmul(AAT, X_[i]))**2/N
        return d2

    #solver = ConjugateGradient()
    solver = SteepestDescent()
    problem = Problem(manifold=man, cost=cost, verbosity=verbosity)
    theta = solver.solve(problem)
    A = theta[0]
    B = theta[1]
    
    if cpx:
        B_ = B[:,:,0] + B[:,:,1]*1j
    else:
        B_ = B

    #tmp = np.array([A.T for i in range(N)])
    tmp = np.array([A.conj().T for i in range(N)])
    X_low = multiprod(tmp, X)
    X_low = np.array([qr(X_low[i])[0] for i in range(N)])

    return X_low, A, B_
コード例 #3
0
ファイル: test_product.py プロジェクト: pymanopt/pymanopt
    def setUp(self):
        self.m = m = 100
        self.n = n = 50
        self.euclidean = Euclidean(m, n)
        self.sphere = Sphere(n)
        self.manifold = Product([self.euclidean, self.sphere])

        point = self.manifold.random_point()

        @pymanopt.function.autograd(self.manifold)
        def cost(*x):
            return np.sum([np.linalg.norm(a - b)**2 for a, b in zip(x, point)])

        self.cost = cost
コード例 #4
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    num_samples, num_weights = 200, 3

    solver = TrustRegions()
    manifold = Euclidean(3)

    for k in range(5):
        samples = rnd.randn(num_samples, num_weights)
        targets = rnd.randn(num_samples)

        cost, egrad, ehess = create_cost_egrad_ehess(backend, samples, targets)
        problem = pymanopt.Problem(manifold,
                                   cost,
                                   egrad=egrad,
                                   ehess=ehess,
                                   verbosity=0)

        estimated_weights = solver.solve(problem)
        if not quiet:
            print("Run {}".format(k + 1))
            print("Weights found by pymanopt (top) / "
                  "closed form solution (bottom)")
            print(estimated_weights)
            print(la.pinv(samples) @ targets)
            print("")
コード例 #5
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    num_samples, num_weights = 200, 3

    optimizer = TrustRegions(verbosity=0)
    manifold = Euclidean(3)

    for k in range(5):
        samples = np.random.normal(size=(num_samples, num_weights))
        targets = np.random.normal(size=num_samples)

        (
            cost,
            euclidean_gradient,
            euclidean_hessian,
        ) = create_cost_and_derivates(manifold, samples, targets, backend)
        problem = pymanopt.Problem(
            manifold,
            cost,
            euclidean_gradient=euclidean_gradient,
            euclidean_hessian=euclidean_hessian,
        )

        estimated_weights = optimizer.run(problem).point
        if not quiet:
            print(f"Run {k + 1}")
            print(
                "Weights found by pymanopt (top) / "
                "closed form solution (bottom)"
            )
            print(estimated_weights)
            print(np.linalg.pinv(samples) @ targets)
            print("")
コード例 #6
0
    def setUp(self):
        self.m = m = 20
        self.n = n = 10
        self.rank = rank = 3

        A = np.random.normal(size=(m, n))
        self.manifold = Product([FixedRankEmbedded(m, n, rank), Euclidean(n)])

        @pymanopt.function.autograd(self.manifold)
        def cost(u, s, vt, x):
            return np.linalg.norm(((u * s) @ vt - A) @ x) ** 2

        self.cost = cost
        self.gradient = self.cost.get_gradient_operator()
        self.hessian = self.cost.get_hessian_operator()

        self.problem = pymanopt.Problem(self.manifold, self.cost)
コード例 #7
0
    def setUp(self):
        self.m = m = 20
        self.n = n = 10
        self.rank = rank = 3

        A = np.random.randn(m, n)

        @pymanopt.function.Autograd
        def cost(u, s, vt, x):
            return np.linalg.norm(((u * s) @ vt - A) @ x)**2

        self.cost = cost
        self.gradient = self.cost.compute_gradient()
        self.hvp = self.cost.compute_hessian_vector_product()

        self.manifold = Product([FixedRankEmbedded(m, n, rank), Euclidean(n)])
        self.problem = pymanopt.Problem(self.manifold, self.cost)
コード例 #8
0
    def fit(self, T, Y, init, maxIter=100):
        self.init_fit(T, Y, None)

        D = self.D + self.L
        K = self.K

        # (1) Instantiate the manifold
        manifold = Product([PositiveDefinite(D + 1, k=K), Euclidean(K - 1)])

        cost = self.get_cost_function(T, Y)

        problem = Problem(manifold=manifold, cost=cost, verbosity=1)

        # (3) Instantiate a Pymanopt solver
        solver = SteepestDescent(maxiter=3 * maxIter)

        # let Pymanopt do the rest
        Xopt = solver.solve(problem)
        self.Xopt_to_theta(Xopt)
コード例 #9
0
    def evaluate(self):
        # Perform reconstruction
        if self.recon_type == 'tp':  # Estimate theta and phi
            manifold = Sphere(3)

            def cost(X, data=self.data):
                ll = self.multiframe.noise_model.loglikelihood(
                    util.xyz2tp(*X), data)
                return -ll

            problem = Problem(manifold=manifold, cost=cost, verbosity=0)
            start_pts = [
                np.array(util.tp2xyz(*x)) for x in util.sphere_profile(20)
            ]
            solver = ParticleSwarm(maxcostevals=200)
            Xopt = solver.solve(problem, x=start_pts)
            self.estimated_fluorophore = fluorophore.Fluorophore(*util.xyz2tp(
                *Xopt))
        elif self.recon_type == 'tpc':  # Estimate theta, phi, constant
            # Create manifold and cost function
            manifold = Product((Sphere(3), Euclidean(1)))

            def cost(X, data=self.data):
                estimate = np.hstack([util.xyz2tp(*X[0]), X[1]])
                ll = self.multiframe.noise_model.loglikelihood(estimate, data)
                return -ll

            problem = Problem(manifold=manifold, cost=cost, verbosity=0)

            # Generate start_pts and format
            xyz_start_pts = 3 * [
                np.array(util.tp2xyz(*x)) for x in util.sphere_profile(10)
            ]
            c_start_pts = np.expand_dims(np.hstack(
                (10 * [0.1], 10 * [2], 10 * [10])),
                                         axis=1)
            start_pts = np.hstack((xyz_start_pts, c_start_pts))
            pts = []
            for start_pt in start_pts:
                pts.append([np.array(start_pt[0:3]), np.array(start_pt[3:5])])

            # Solve
            solver = ParticleSwarm(maxcostevals=250)
            Xopt = solver.solve(problem, x=pts)

            self.estimated_fluorophore = fluorophore.Fluorophore(
                *np.hstack([util.xyz2tp(*Xopt[0]), Xopt[1]]).flatten())

        elif self.recon_type == 'tpck':  # Estimate theta, phi, constant, kappa
            # Create manifold and cost function
            manifold = Product((Sphere(3), Euclidean(2)))

            def cost(X, data=self.data):
                estimate = np.array([
                    util.xyz2tp(*X[0]), X[1]
                ]).flatten()  # Reshape data for loglikelihood function
                ll = self.multiframe.noise_model.loglikelihood(estimate, data)
                print(estimate, ll)
                return -ll

            problem = Problem(manifold=manifold, cost=cost, verbosity=0)

            # Generate start_pts and format
            xyz_start_pts = 3 * [
                np.array(util.tp2xyz(*x)) for x in util.sphere_profile(10)
            ]
            k_start_pts = np.expand_dims(np.hstack(
                (10 * [-100], 10 * [0], 10 * [100])),
                                         axis=1)
            c_start_pts = np.expand_dims(np.hstack(
                (10 * [0.1], 10 * [1], 10 * [10])),
                                         axis=1)
            start_pts = np.hstack((xyz_start_pts, c_start_pts, k_start_pts))
            pts = []
            for start_pt in start_pts:
                pts.append([np.array(start_pt[0:3]), np.array(start_pt[3:5])])

            # Solve
            solver = ParticleSwarm(maxcostevals=200)
            Xopt = solver.solve(problem, x=pts)

            self.estimated_fluorophore = fluorophore.Fluorophore(
                *np.array([util.xyz2tp(*Xopt[0]), Xopt[1]]).flatten())
コード例 #10
0
    
    with tf.name_scope("test") as scope:
      correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
      accuracy_summary =tf.summary.scalar("accuracy", accuracy)

    #with tf.name_scope("e_test") as scope:
        #e_correct_prediction = tf.equal(tf.argmax(e_y, 1), tf.argmax(y_, 1))
        #e_accuracy = tf.reduce_mean(tf.cast(e_correct_prediction, "float"))
        #e_accuracy_summary =tf.summary.scalar("e_accuracy", e_accuracy)

    summaries = tf.summary.merge_all()



    manifold_b = Euclidean(1,10)
    if use_parameterization:
        manifold_A = Euclidean(784, k)
        manifold_B = Euclidean(k, 10)
        manifold_W = Product([manifold_A,manifold_B])
        arg = [A, B, b]
    else:
        manifold_W = FixedRankEmbedded(784, 10, k)
        #manifold_W = Product([Stiefel(784,k), Euclidean(k), Stiefel(k,10)])
        arg = [A, M, B, b]

    manifold = Product([manifold_W, manifold_b])

    e_manifold_W = Euclidean(784, 10)
    e_arg = [eW, b]
コード例 #11
0
def main():
    """Main function to run nonlinear manifold optimization on SE(3) to estimate
    an optimal relative pose transformation between coordinate frames given by
    the different lidar sensors."""
    # Extract and process the CSVs
    main_odometry = relative_pose_processing.process_df(MAIN_ODOM_CSV)
    front_odometry = relative_pose_processing.process_df(FRONT_ODOM_CSV)
    rear_odometry = relative_pose_processing.process_df(REAR_ODOM_CSV)

    # Process poses
    (main_aligned, front_aligned,
     rear_aligned) = relative_pose_processing.align_df(
         [main_odometry, front_odometry, rear_odometry])

    # Get ICP covariance matrices
    # Main lidar odometry
    main_icp, main_trans_cov, main_trans_cov_max, \
        main_trans_cov_avg, main_rot_cov, main_rot_cov_max, \
        main_rot_cov_avg, main_reject = parse_icp_cov(main_odometry, type="main",
                                                      reject_thr=REJECT_THR)

    # Front lidar odometry
    front_icp, front_trans_cov, front_trans_cov_max, \
        front_trans_cov_avg, front_rot_cov, front_rot_cov_max, \
        front_rot_cov_avg, front_reject = parse_icp_cov(front_odometry, type="front",
                                                        reject_thr=REJECT_THR)

    # Rear lidar odometry
    rear_icp, rear_trans_cov, rear_trans_cov_max, \
        rear_trans_cov_avg, rear_rot_cov, rear_rot_cov_max, \
        rear_rot_cov_avg, rear_reject = parse_icp_cov(rear_odometry, type="rear",
                                                      reject_thr=REJECT_THR)

    # Calculate relative poses
    (main_aligned,
     main_rel_poses) = relative_pose_processing.calc_rel_poses(main_aligned)
    (front_aligned,
     front_rel_poses) = relative_pose_processing.calc_rel_poses(front_aligned)
    (rear_aligned,
     rear_rel_poses) = relative_pose_processing.calc_rel_poses(rear_aligned)

    cov_t_main, cov_R_main = compute_weights_euler(main_aligned)
    cov_t_front, cov_R_front = compute_weights_euler(front_aligned)
    cov_t_rear, cov_R_rear = compute_weights_euler(rear_aligned)

    # Extract a single scalar using the average value from rotation and translation
    var_t_main = extract_variance(cov_t_main, mode="max")
    var_R_main = extract_variance(cov_R_main, mode="max")
    var_t_front = extract_variance(cov_t_front, mode="max")
    var_R_front = extract_variance(cov_R_front, mode="max")
    var_t_rear = extract_variance(cov_t_main, mode="max")
    var_R_rear = extract_variance(cov_R_rear, mode="max")

    # Optimization (1) Instantiate a manifold
    translation_manifold = Euclidean(3)  # Translation vector
    so3 = Rotations(3)  # Rotation matrix
    manifold = Product((so3, translation_manifold))  # Instantiate manifold

    # Get initial guesses for our estimations
    initial_poses = {}
    if os.path.exists(PKL_POSES_PATH):  # Check to make sure path exists
        transforms_dict = load_transforms(
            PKL_POSES_PATH)  # Loads relative transforms

    # Now get initial guesses from the relative poses
    initial_guess_main_front = transforms_dict[
        "velodyne_front"]  # Get relative transform from main to front (T^{V}_{F})
    initial_guess_main_rear = transforms_dict[
        "velodyne_rear"]  # Get relative transform from front to main T^{V}_{B})
    initial_guess_front_rear = np.linalg.inv(
        initial_guess_main_front
    ) @ initial_guess_main_rear  # Get relative transform from front to rear T^{B}_{W})
    direct_initial_guess_front_rear = transforms_dict[
        "direct_front_rear"]  # Transform directly computed

    # Print out all the initial estimates as poses
    print(
        "INITIAL GUESS MAIN FRONT: \n {} \n".format(initial_guess_main_front))
    print("INITIAL GUESS MAIN REAR: \n {} \n".format(initial_guess_main_rear))
    print(
        "INITIAL GUESS FRONT REAR: \n {} \n".format(initial_guess_front_rear))
    print("INITIAL GUESS DIRECT FRONT REAR: \n {} \n".format(
        direct_initial_guess_front_rear))

    # Get rotation matrices for initial guesses
    R0_main_front, t0_main_front = initial_guess_main_front[:3, :
                                                            3], initial_guess_main_front[:
                                                                                         3,
                                                                                         3]
    X0_main_front = (R0_main_front, t0_main_front)
    print("INITIAL GUESS MAIN FRONT: \n R0: \n {} \n\n t0: \n {} \n".format(
        R0_main_front, t0_main_front))

    R0_main_rear, t0_main_rear = initial_guess_main_rear[:3, :
                                                         3], initial_guess_main_rear[:
                                                                                     3,
                                                                                     3]
    X0_main_rear = (R0_main_rear, t0_main_rear)
    print("INITIAL GUESS MAIN REAR: \n R0: \n {} \n\n t0: \n {} \n".format(
        R0_main_rear, t0_main_rear))

    R0_front_rear, t0_front_rear = initial_guess_front_rear[:3, :
                                                            3], initial_guess_front_rear[:
                                                                                         3,
                                                                                         3]
    X0_front_rear = (R0_front_rear, t0_front_rear)
    print("INITIAL GUESS FRONT REAR: \n R0: \n {} \n\n t0: \n {} \n".format(
        R0_front_rear, t0_front_rear))

    ######################## MAIN FRONT CALIBRATION ################################
    # Carry out optimization for main-front homogeneous transformations
    ### PARAMETERS ###
    A = np.array(front_rel_poses)  # First set of poses
    B = np.array(main_rel_poses)  # Second set of poses
    N = min(A.shape[0], B.shape[0])
    r = np.logical_or(np.array(main_reject[:N]), np.array(
        front_reject[:N]))  # If either has high variance, reject the sample
    omega = np.max([var_R_main,
                    var_R_front])  # Take average across different odometries
    rho = np.max([var_t_main,
                  var_t_front])  # Take average across different odometries
    ### PARAMETERS ###

    cost_main_front = lambda x: cost(x, A, B, r, rho, omega, WEIGHTED)
    problem_main_front = Problem(
        manifold=manifold, cost=cost_main_front
    )  # (2a) Compute the optimization between main and front
    solver_main_front = CustomSteepestDescent(
    )  # (3) Instantiate a Pymanopt solver
    Xopt_main_front = solver_main_front.solve(problem_main_front,
                                              x=X0_main_front)
    print("Initial Guess for Main-Front Transformation: \n {}".format(
        initial_guess_main_front))
    print("Optimal solution between main and front reference frames: \n {}".
          format(Xopt_main_front))

    # Take intermediate values for plotting
    estimates_x_main_front = solver_main_front.estimates
    errors_main_front = solver_main_front.errors
    iters_main_front = solver_main_front.iterations

    # Metrics dictionary
    estimates_dict_main_front = {
        i: T
        for i, T in zip(iters_main_front, estimates_x_main_front)
    }
    error_dict_main_front = {
        i: e
        for i, e in zip(iters_main_front, errors_main_front)
    }

    # Save intermediate results to a pkl file
    estimates_fname_main_front = os.path.join(ANALYSIS_RESULTS_PATH,
                                              "estimates_main_front.pkl")
    error_fname_main_front = os.path.join(ANALYSIS_RESULTS_PATH,
                                          "error_main_front.pkl")

    # Save estimates to pickle file
    with open(estimates_fname_main_front, "wb") as pkl_estimates:
        pickle.dump(estimates_dict_main_front, pkl_estimates)
        pkl_estimates.close()

    # Save error to pickle file
    with open(error_fname_main_front, "wb") as pkl_error:
        pickle.dump(error_dict_main_front, pkl_error)
        pkl_error.close()

    # Calculate difference between initial guess and final
    XOpt_T_main_front = construct_pose(Xopt_main_front[0],
                                       Xopt_main_front[1].reshape((3, 1)))
    print("DIFFERENCE IN MATRICES: \n {}".format(
        np.subtract(XOpt_T_main_front, initial_guess_main_front)))

    # Compute the weighted and unweighted RMSE
    rmse_init_weighted, rmse_final_weighted, rmse_init_R_weighted, \
    rmse_init_t_weighted, rmse_final_R_weighted, \
    rmse_final_t_weighted = compute_rmse_weighted(initial_guess_main_front,
                                                  XOpt_T_main_front, A, B, rho,
                                                  omega)
    rmse_init_unweighted, rmse_final_unweighted, rmse_init_R_unweighted, \
    rmse_init_t_unweighted, rmse_final_R_unweighted, \
    rmse_final_t_unweighted = compute_rmse_unweighted(initial_guess_main_front,
                                                      XOpt_T_main_front, A, B)
    rmses = [
        rmse_init_unweighted, rmse_final_unweighted, rmse_init_weighted,
        rmse_final_weighted, rmse_init_R_unweighted, rmse_init_t_unweighted,
        rmse_final_R_unweighted, rmse_final_t_unweighted, rmse_init_R_weighted,
        rmse_init_t_weighted, rmse_final_R_weighted, rmse_final_t_weighted
    ]

    # Display and save RMSEs
    outpath = os.path.join(ANALYSIS_RESULTS_PATH, "main_front_rmse.txt")
    display_and_save_rmse(rmses, outpath)

    # Save final estimates
    final_estimate_outpath = os.path.join(FINAL_ESTIMATES_PATH,
                                          "main_front_final.txt")
    np.savetxt(final_estimate_outpath, XOpt_T_main_front)
    ################################################################################

    ######################## MAIN REAR CALIBRATION #################################
    ### PARAMETERS ###
    A = np.array(rear_rel_poses)  # First set of poses
    B = np.array(main_rel_poses)  # Second set of poses
    N = min(A.shape[0], B.shape[0])
    r = np.logical_or(np.array(main_reject[:N]), np.array(
        rear_reject[:N]))  # If either has high variance, reject the sample
    omega = np.max([var_R_main,
                    var_R_rear])  # Take average across different odometries
    rho = np.max([var_t_main,
                  var_t_rear])  # Take average across different odometries
    ### PARAMETERS ###

    cost_main_rear = lambda x: cost(x, A, B, r, rho, omega, WEIGHTED)
    # Carry out optimization for main-rear homogeneous transformations
    problem_main_rear = Problem(
        manifold=manifold, cost=cost_main_rear
    )  # (2a) Compute the optimization between main and front
    solver_main_rear = CustomSteepestDescent(
    )  # (3) Instantiate a Pymanopt solver
    Xopt_main_rear = solver_main_rear.solve(problem_main_rear, x=X0_main_rear)
    print("Initial Guess for Main-Rear Transformation: \n {}".format(
        initial_guess_main_rear))
    print("Optimal solution between main and rear reference frames: \n {}".
          format(Xopt_main_rear))

    # Take intermediate values for plotting
    estimates_x_main_rear = solver_main_rear.estimates
    errors_main_rear = solver_main_rear.errors
    iters_main_rear = solver_main_rear.iterations

    # Metrics dictionary
    estimates_dict_main_rear = {
        i: T
        for i, T in zip(iters_main_rear, estimates_x_main_rear)
    }
    error_dict_main_rear = {
        i: e
        for i, e in zip(iters_main_rear, errors_main_rear)
    }

    # Save intermediate results to a pkl file
    estimates_fname_main_rear = os.path.join(ANALYSIS_RESULTS_PATH,
                                             "estimates_main_rear.pkl")
    error_fname_main_rear = os.path.join(ANALYSIS_RESULTS_PATH,
                                         "error_main_rear.pkl")

    # Save estimates to pickle file
    with open(estimates_fname_main_rear, "wb") as pkl_estimates:
        pickle.dump(estimates_dict_main_rear, pkl_estimates)
        pkl_estimates.close()

    # Save error to pickle file
    with open(error_fname_main_rear, "wb") as pkl_error:
        pickle.dump(error_dict_main_rear, pkl_error)
        pkl_error.close()

    # Calculate difference between initial guess and final
    XOpt_T_main_rear = construct_pose(Xopt_main_rear[0],
                                      Xopt_main_rear[1].reshape((3, 1)))
    print("DIFFERENCE IN MATRICES: \n {}".format(
        np.subtract(XOpt_T_main_rear, initial_guess_main_rear)))

    # Compute the weighted and unweighted RMSE
    rmse_init_weighted, rmse_final_weighted, rmse_init_R_weighted, \
           rmse_init_t_weighted, rmse_final_R_weighted, \
           rmse_final_t_weighted = compute_rmse_weighted(initial_guess_main_rear, XOpt_T_main_rear, A, B, rho, omega)
    rmse_init_unweighted, rmse_final_unweighted, rmse_init_R_unweighted, \
           rmse_init_t_unweighted, rmse_final_R_unweighted, \
           rmse_final_t_unweighted = compute_rmse_unweighted(initial_guess_main_rear, XOpt_T_main_rear, A, B)
    rmses = [
        rmse_init_unweighted, rmse_final_unweighted, rmse_init_weighted,
        rmse_final_weighted, rmse_init_R_unweighted, rmse_init_t_unweighted,
        rmse_final_R_unweighted, rmse_final_t_unweighted, rmse_init_R_weighted,
        rmse_init_t_weighted, rmse_final_R_weighted, rmse_final_t_weighted
    ]

    # Display and save RMSEs
    outpath = os.path.join(ANALYSIS_RESULTS_PATH, "main_rear_rmse.txt")
    display_and_save_rmse(rmses, outpath)

    # Save final estimates
    final_estimate_outpath = os.path.join(FINAL_ESTIMATES_PATH,
                                          "main_rear_final.txt")
    np.savetxt(final_estimate_outpath, XOpt_T_main_rear)
    ################################################################################

    ######################## FRONT REAR CALIBRATION ################################
    ### PARAMETERS ###
    A = np.array(rear_rel_poses)  # First set of poses
    B = np.array(front_rel_poses)  # Second set of poses
    N = min(A.shape[0], B.shape[0])
    r = np.logical_or(np.array(front_reject[:N]), np.array(
        rear_reject[:N]))  # If either has high variance, reject the sample
    omega = np.max([var_R_front,
                    var_R_rear])  # Take average across different odometries
    rho = np.max([var_t_front,
                  var_t_rear])  # Take average across different odometries
    ### PARAMETERS ###

    cost_front_rear = lambda x: cost(x, A, B, r, rho, omega, WEIGHTED)
    # Carry out optimization for front-rear homogeneous transformations
    problem_front_rear = Problem(
        manifold=manifold, cost=cost_front_rear
    )  # (2a) Compute the optimization between main and front
    solver_front_rear = CustomSteepestDescent(
    )  # (3) Instantiate a Pymanopt solver
    Xopt_front_rear = solver_front_rear.solve(problem_front_rear,
                                              x=X0_front_rear)
    print("Initial Guess for Front-Rear Transformation: \n {}".format(
        initial_guess_front_rear))
    print("Optimal solution between front and rear reference frames: \n {}".
          format(Xopt_front_rear))

    # Take intermediate values for plotting
    estimates_x_front_rear = solver_front_rear.estimates
    errors_front_rear = solver_front_rear.errors
    iters_front_rear = solver_front_rear.iterations

    # Metrics dictionary
    estimates_dict_front_rear = {
        i: T
        for i, T in zip(iters_front_rear, estimates_x_front_rear)
    }
    error_dict_front_rear = {
        i: e
        for i, e in zip(iters_front_rear, errors_front_rear)
    }

    # Save intermediate results to a pkl file
    estimates_fname_front_rear = os.path.join(ANALYSIS_RESULTS_PATH,
                                              "estimates_front_rear.pkl")
    error_fname_front_rear = os.path.join(ANALYSIS_RESULTS_PATH,
                                          "error_front_rear.pkl")

    # Save estimates to pickle file
    with open(estimates_fname_front_rear, "wb") as pkl_estimates:
        pickle.dump(estimates_dict_front_rear, pkl_estimates)
        pkl_estimates.close()

    # Save error to pickle file
    with open(error_fname_front_rear, "wb") as pkl_error:
        pickle.dump(error_dict_front_rear, pkl_error)
        pkl_error.close()

    # Calculate difference between initial guess and final
    XOpt_T_front_rear = construct_pose(Xopt_front_rear[0],
                                       Xopt_front_rear[1].reshape((3, 1)))
    print("DIFFERENCE IN MATRICES: \n {}".format(
        np.subtract(XOpt_T_front_rear, initial_guess_front_rear)))

    # Compute the weighted and unweighted RMSE
    rmse_init_weighted, rmse_final_weighted, rmse_init_R_weighted, \
    rmse_init_t_weighted, rmse_final_R_weighted, \
    rmse_final_t_weighted = compute_rmse_weighted(initial_guess_front_rear,
                                                  XOpt_T_front_rear, A, B, rho,
                                                  omega)
    rmse_init_unweighted, rmse_final_unweighted, rmse_init_R_unweighted, \
    rmse_init_t_unweighted, rmse_final_R_unweighted, \
    rmse_final_t_unweighted = compute_rmse_unweighted(initial_guess_front_rear,
                                                      XOpt_T_front_rear, A, B)
    rmses = [
        rmse_init_unweighted, rmse_final_unweighted, rmse_init_weighted,
        rmse_final_weighted, rmse_init_R_unweighted, rmse_init_t_unweighted,
        rmse_final_R_unweighted, rmse_final_t_unweighted, rmse_init_R_weighted,
        rmse_init_t_weighted, rmse_final_R_weighted, rmse_final_t_weighted
    ]

    # Display and save RMSEs
    outpath = os.path.join(ANALYSIS_RESULTS_PATH, "front_rear_rmse.txt")
    display_and_save_rmse(rmses, outpath)

    # Save final estimates
    final_estimate_outpath = os.path.join(FINAL_ESTIMATES_PATH,
                                          "front_rear_final.txt")
    np.savetxt(final_estimate_outpath, XOpt_T_front_rear)
    ################################################################################

    # Display all results
    print("_________________________________________________________")
    print("_____________________ALL RESULTS_________________________")
    print("_________________________________________________________")
    print("Initial Guess for Main-Front Transformation: \n {}".format(
        initial_guess_main_front))
    print("Optimal solution between main and front reference frames: \n {}".
          format(Xopt_main_front))
    print("_________________________________________________________")
    print("Initial Guess for Main-Rear Transformation: \n {}".format(
        initial_guess_main_rear))
    print("Optimal solution between main and rear reference frames: \n {}".
          format(Xopt_main_rear))
    print("_________________________________________________________")
    print("Initial Guess for Front-Rear Transformation: \n {}".format(
        initial_guess_front_rear))
    print("Optimal solution between front and rear reference frames: \n {}".
          format(Xopt_front_rear))
    print("_________________________________________________________")
コード例 #12
0
def fit_gpytorch_manifold(
    mll: MarginalLogLikelihood,
    bounds: Optional[ParameterBounds] = None,
    solver: Solver = pyman_solvers.ConjugateGradient(maxiter=500),
    nb_init_candidates: int = 200,
    last_x_as_candidate_prob: float = 0.9,
    options: Optional[Dict[str, Any]] = None,
    track_iterations: bool = True,
    approx_mll: bool = False,
    module_to_array_func: TModToArray = module_to_list_of_array,
    module_from_array_func: TArrayToMod = set_params_with_list_of_array,
) -> Tuple[MarginalLogLikelihood, Dict[str, Union[
        float, List[OptimizationIteration]]]]:
    """
    This function fits a gpytorch model by maximizing MLL with a pymanopt optimizer.

    The model and likelihood in mll must already be in train mode.
    This method requires that the model has `train_inputs` and `train_targets`.

    Parameters
    ----------
    :param mll: MarginalLogLikelihood to be maximized.

    Optional parameters
    -------------------
    :param nb_init_candidates: number of random initial candidates for the GP parameters
    :param last_x_as_candidate_prob: probability that the last set of parameter is among the initial candidates
    :param bounds: A dictionary mapping parameter names to tuples of lower and upper bounds.
    :param solver: Pymanopt solver.
    :param options: Dictionary of solver options, passed along to scipy.minimize.
    :param track_iterations: Track the function values and wall time for each iteration.
    :param approx_mll: If True, use gpytorch's approximate MLL computation. This is disabled by default since the
        stochasticity is an issue for determistic optimizers). Enabling this is only recommended when working with
        large training data sets (n>2000).

    Returns
    -------
    :return: 2-element tuple containing
        - MarginalLogLikelihood with parameters optimized in-place.
        - Dictionary with the following key/values:
            "fopt": Best mll value.
            "wall_time": Wall time of fitting.
            "iterations": List of OptimizationIteration objects with information on each iteration.
                If track_iterations is False, will be empty.

    Example:
    gp = SingleTaskGP(train_X, train_Y)
    mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
    mll.train()
    fit_gpytorch_scipy(mll)
    mll.eval()
    """
    options = options or {}
    # Current parameters
    x0, property_dict, bounds = module_to_array_func(module=mll,
                                                     bounds=bounds,
                                                     exclude=options.pop(
                                                         "exclude", None))
    x0 = [x0i.astype(np.float64) for x0i in x0]
    if bounds is not None:
        warnings.warn(
            'Bounds handling not supported yet in fit_gpytorch_manifold')
        # bounds = Bounds(lb=bounds[0], ub=bounds[1], keep_feasible=True)

    t1 = time.time()

    # Define cost function
    def cost(x):
        param_dict = OrderedDict(mll.named_parameters())
        idx = 0
        for p_name, attrs in property_dict.items():
            # Construct the new tensor
            if len(attrs.shape) == 0:  # deal with scalar tensors
                # new_data = torch.tensor(x[0], dtype=attrs.dtype, device=attrs.device)
                new_data = torch.tensor(x[idx][0],
                                        dtype=attrs.dtype,
                                        device=attrs.device)
            else:
                # new_data = torch.tensor(x, dtype=attrs.dtype, device=attrs.device).view(*attrs.shape)
                new_data = torch.tensor(x[idx],
                                        dtype=attrs.dtype,
                                        device=attrs.device).view(*attrs.shape)
            param_dict[p_name].data = new_data
            idx += 1
        # mllx = set_params_with_array(mll, x, property_dict)
        train_inputs, train_targets = mll.model.train_inputs, mll.model.train_targets
        mll.zero_grad()
        output = mll.model(*train_inputs)
        args = [output, train_targets] + _get_extra_mll_args(mll)
        loss = -mll(*args).sum()
        return loss

    def egrad(x):
        loss = cost(x)
        loss.backward()
        param_dict = OrderedDict(mll.named_parameters())
        grad = []
        for p_name in property_dict:
            t = param_dict[p_name].grad
            if t is None:
                # this deals with parameters that do not affect the loss
                if len(property_dict[p_name].shape
                       ) > 1 and property_dict[p_name].shape[0] > 1:
                    # if the variable is a matrix, keep its shape
                    grad.append(np.zeros(property_dict[p_name].shape))
                else:
                    grad.append(np.zeros(property_dict[p_name].shape))
            else:
                if t.ndim > 1 and t.shape[
                        0] > 1:  # if the variable is a matrix, keep its shape
                    grad.append(t.detach().cpu().double().clone().numpy())
                else:  # Vector case
                    grad.append(
                        t.detach().view(-1).cpu().double().clone().numpy())
        return grad

    # Define the manifold (product of manifolds)
    manifolds_list = []
    for p_name, t in mll.named_parameters():
        try:
            # If a manifold is given add it
            manifolds_list.append(attrgetter(p_name + "_manifold")(mll))
        except AttributeError:
            # Otherwise, default: Euclidean
            manifolds_list.append(
                Euclidean(int(np.prod(property_dict[p_name].shape))))
    # Product of manifolds
    manifold = Product(manifolds_list)

    # Instanciate the problem on the manifold
    if track_iterations:
        verbosity = 2
    else:
        verbosity = 0

    problem = Problem(manifold=manifold,
                      cost=cost,
                      egrad=egrad,
                      verbosity=verbosity,
                      arg=torch.Tensor())  #, precon=precon)

    # For cases where the Hessian is hard/long to compute, we approximate it with finite differences of the gradient.
    # Typical cases: the Hessian can be hard to compute due to the 2nd derivative of the eigenvalue decomposition,
    # e.g. in the SPD affine-invariant distance.
    problem._hess = types.MethodType(get_hessianfd, problem)

    # Choose initial parameters
    # Do not always consider x0, to encourage variations of the parameters.
    if np.random.rand() < last_x_as_candidate_prob:
        x0_candidates = [x0]
        x0_candidates += [
            manifold.rand() for i in range(nb_init_candidates - 1)
        ]
    else:
        x0_candidates = []
        x0_candidates += [manifold.rand() for i in range(nb_init_candidates)]
    for i in range(int(3 * nb_init_candidates / 4)):
        x0_candidates[i][0:4] = x0[0:4]  #TODO remove hard-coding
    y0_candidates = [cost(x0_candidates[i]) for i in range(nb_init_candidates)]

    y_init, x_init_idx = torch.Tensor(y0_candidates).min(0)
    x_init = x0_candidates[x_init_idx]

    with gpt_settings.fast_computations(log_prob=approx_mll):
        # Logverbosity of the solver to 1
        solver._logverbosity = 1
        # Solve
        opt_x, opt_log = solver.solve(problem, x=x_init)

    # Construct info dict
    info_dict = {
        "fopt": float(cost(opt_x).detach().numpy()),
        "wall_time": time.time() - t1,
        "opt_log": opt_log,
    }
    # if not res.success:  # TODO update
    #     try:
    #         # Some res.message are bytes
    #         msg = res.message.decode("ascii")
    #     except AttributeError:
    #         # Others are str
    #         msg = res.message
    #     warnings.warn(
    #         f"Fitting failed with the optimizer reporting '{msg}'", OptimizationWarning
    #     )
    # Set to optimum
    mll = module_from_array_func(mll, opt_x, property_dict)
    return mll, info_dict
コード例 #13
0
ファイル: sssrm.py プロジェクト: vincehass/brainiak
    def _update_classifier(self, data, labels, w, classes):
        """Update the classifier parameters theta and bias

        Parameters
        ----------

        data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
            Each element in the list contains the fMRI data of one subject for
            the classification task.

        labels : list of arrays of int, element i has shape=[samples_i]
            Each element in the list contains the labels for the data samples
            in data_sup.

        w : list of 2D array, element i has shape=[voxels_i, features]
            The orthogonal transforms (mappings) :math:`W_i` for each subject.

        classes : int
            The number of classes in the classifier.


        Returns
        -------

        theta : array, shape=[features, classes]
            The MLR parameter for the class planes.

        bias : array shape=[classes,]
            The MLR parameter for class biases.
        """

        # Stack the data and labels for training the classifier
        data_stacked, labels_stacked, weights = \
            SSSRM._stack_list(data, labels, w)

        features = w[0].shape[1]
        total_samples = weights.size

        data_th = S.shared(data_stacked.astype(theano.config.floatX))
        val_ = S.shared(labels_stacked)
        total_samples_S = S.shared(total_samples)
        theta_th = T.matrix(name='theta', dtype=theano.config.floatX)
        bias_th = T.col(name='bias', dtype=theano.config.floatX)
        constf2 = S.shared(self.alpha / self.gamma, allow_downcast=True)
        weights_th = S.shared(weights)

        log_p_y_given_x = \
            T.log(T.nnet.softmax((theta_th.T.dot(data_th.T)).T + bias_th.T))
        f = -constf2 * T.sum(
            (log_p_y_given_x[T.arange(total_samples_S), val_]) /
            weights_th) + 0.5 * T.sum(theta_th**2)

        manifold = Product((Euclidean(features,
                                      classes), Euclidean(classes, 1)))
        problem = Problem(manifold=manifold,
                          cost=f,
                          arg=[theta_th, bias_th],
                          verbosity=0)
        solver = ConjugateGradient(mingradnorm=1e-6)
        solution = solver.solve(problem)
        theta = solution[0]
        bias = solution[1]

        del constf2
        del theta_th
        del bias_th
        del data_th
        del val_
        del solver
        del solution

        return theta, bias
コード例 #14
0
ファイル: test_euclidean.py プロジェクト: agoel00/MBAweb
 def setUp(self):
     self.m = m = 10
     self.n = n = 5
     self.man = Euclidean(m, n)
コード例 #15
0
from pymanopt.manifolds import Euclidean
from pymanopt.solvers import TrustRegions

if __name__ == "__main__":
    # Cost function is the squared reconstruction error
    X = torch.from_numpy(np.zeros((200, 3)))
    y = torch.from_numpy(np.zeros((200, 3)))

    def cost(w):
        return (y - X.matmul(w)).pow(2).sum()

    # A solver that involves the hessian
    solver = TrustRegions()

    # R^3
    manifold = Euclidean(3, 1)

    # Create the problem with extra cost function arguments
    problem = Problem(manifold=manifold,
                      cost=cost,
                      verbosity=0,
                      arg=torch.Tensor())

    # Solve 5 instances of the same type of problem for different data input
    for k in range(0, 5):
        # Generate random data
        X = torch.from_numpy(np.random.randn(200, 3))
        y = torch.from_numpy(np.random.randn(200, 1))

        wopt = solver.solve(problem)
        print('Run {}'.format(k + 1))
コード例 #16
0
class TestProductManifold(unittest.TestCase):
    def setUp(self):
        self.m = m = 100
        self.n = n = 50
        self.euclidean = Euclidean(m, n)
        self.sphere = Sphere(n)
        self.man = Product([self.euclidean, self.sphere])

    def test_dim(self):
        np_testing.assert_equal(self.man.dim, self.m*self.n+self.n-1)

    def test_typicaldist(self):
        np_testing.assert_equal(self.man.typicaldist,
                                np.sqrt((self.m*self.n)+np.pi**2))

    def test_dist(self):
        X = self.man.rand()
        Y = self.man.rand()
        np_testing.assert_equal(self.man.dist(X, Y),
                                np.sqrt(
                                    self.euclidean.dist(X[0], Y[0])**2 +
                                    self.sphere.dist(X[1], Y[1])**2))

    # def test_inner(self):

    # def test_proj(self):

    # def test_ehess2rhess(self):

    # def test_retr(self):

    # def test_egrad2rgrad(self):

    # def test_norm(self):

    # def test_rand(self):

    # def test_randvec(self):

    # def test_transp(self):

    def test_exp_log_inverse(self):
        s = self.man
        X = s.rand()
        Y = s.rand()
        Yexplog = s.exp(X, s.log(X, Y))
        np_testing.assert_almost_equal(s.dist(Y, Yexplog), 0)

    def test_log_exp_inverse(self):
        s = self.man
        X = s.rand()
        U = s.randvec(X)
        Ulogexp = s.log(X, s.exp(X, U))
        np_testing.assert_array_almost_equal(U[0], Ulogexp[0])
        np_testing.assert_array_almost_equal(U[1], Ulogexp[1])

    def test_pairmean(self):
        s = self.man
        X = s.rand()
        Y = s.rand()
        Z = s.pairmean(X, Y)
        np_testing.assert_array_almost_equal(s.dist(X, Z), s.dist(Y, Z))
コード例 #17
0
 def setUp(self):
     self.m = m = 100
     self.n = n = 50
     self.euclidean = Euclidean(m, n)
     self.sphere = Sphere(n)
     self.man = Product([self.euclidean, self.sphere])
コード例 #18
0
 def setUp(self):
     self.m = m = 100
     self.n = n = 50
     self.euclidean = Euclidean(m, n)
     self.sphere = Sphere(n)
     self.man = Product([self.euclidean, self.sphere])
コード例 #19
0
class TestProductManifold(unittest.TestCase):
    def setUp(self):
        self.m = m = 100
        self.n = n = 50
        self.euclidean = Euclidean(m, n)
        self.sphere = Sphere(n)
        self.man = Product([self.euclidean, self.sphere])

    def test_dim(self):
        np_testing.assert_equal(self.man.dim, self.m * self.n + self.n - 1)

    def test_typicaldist(self):
        np_testing.assert_equal(self.man.typicaldist,
                                np.sqrt((self.m * self.n) + np.pi**2))

    def test_dist(self):
        X = self.man.rand()
        Y = self.man.rand()
        np_testing.assert_equal(
            self.man.dist(X, Y),
            np.sqrt(
                self.euclidean.dist(X[0], Y[0])**2 +
                self.sphere.dist(X[1], Y[1])**2))

    # def test_inner(self):

    # def test_proj(self):

    # def test_ehess2rhess(self):

    # def test_retr(self):

    # def test_egrad2rgrad(self):

    # def test_norm(self):

    # def test_rand(self):

    # def test_randvec(self):

    # def test_transp(self):

    def test_exp_log_inverse(self):
        s = self.man
        X = s.rand()
        Y = s.rand()
        Yexplog = s.exp(X, s.log(X, Y))
        np_testing.assert_almost_equal(s.dist(Y, Yexplog), 0)

    def test_log_exp_inverse(self):
        s = self.man
        X = s.rand()
        U = s.randvec(X)
        Ulogexp = s.log(X, s.exp(X, U))
        np_testing.assert_array_almost_equal(U[0], Ulogexp[0])
        np_testing.assert_array_almost_equal(U[1], Ulogexp[1])

    def test_pairmean(self):
        s = self.man
        X = s.rand()
        Y = s.rand()
        Z = s.pairmean(X, Y)
        np_testing.assert_array_almost_equal(s.dist(X, Z), s.dist(Y, Z))
コード例 #20
0
    def manifold_fit(self, Y, X):
        from pymanopt.manifolds import Rotations, Euclidean, Product
        from pymanopt import Problem
        from pymanopt.solvers import TrustRegions
        # from minimal_varx import make_normalized_G

        cov_res, cov_xlag, cov_y_xlag = calc_extended_covariance(Y, X, self.p)
        self.set_covs(cov_res, cov_xlag)
        # m = X.shape[0]

        with_c = (self.mm_degree > self.agg_rnk) and (self.m - self.agg_rnk)
        C = Euclidean(self.mm_degree - self.agg_rnk, self.m - self.agg_rnk)
        RG = Rotations(self.m)
        if with_c:
            raise (ValueError(
                "This option is implemented only for self.agg_rnk == m"))
        if with_c:
            manifold = Product([RG, C])
        else:
            manifold = RG
        if not with_c:
            c_null = np.zeros(
                (self.mm_degree - self.agg_rnk, self.m - self.agg_rnk))
        else:
            c_null = None

        if with_c:

            def cost(x):
                o, c = x
                G = make_normalized_G(self, self.m, o, c)
                self.calc_states(G)
                return self.neg_log_llk
        else:

            def cost(x):
                G = make_normalized_G(self, self.m, x, c_null)
                self.calc_states(G)
                return self.neg_log_llk

        if with_c:

            def egrad(x):
                o, c = x
                grad_o, grad_c = self.map_o_c_grad(self._gradient_tensor, o, c)
                return [grad_o, grad_c]
        else:

            def egrad(x):
                grad_o, grad_c = self.map_o_c_grad(self._gradient_tensor, x,
                                                   c_null)

                return grad_o

        if with_c:

            def ehess(x, Heta):
                o, c = x
                eta = make_normalized_G(self, self.m, Heta[0], Heta[1])
                hess_raw = self.hessian_prod(eta)
                hess_o, hess_c = self.map_o_c_grad(hess_raw, o, c)
                return [hess_o, hess_c]

        else:

            def ehess(x, Heta):
                eta = make_normalized_G(self, self.m, Heta, c_null)
                hess_raw = self.hessian_prod(eta)
                hess_o, hess_c = self.map_o_c_grad(hess_raw, x, c_null)
                return hess_o

        if with_c:
            min_mle = Problem(manifold, cost, egrad=egrad, ehess=ehess)
        else:
            min_mle = Problem(manifold, cost, egrad=egrad, ehess=ehess)

        solver = TrustRegions()
        opt = solver.solve(min_mle)
        if with_c:
            G_opt = make_normalized_G(self, self.m, opt[0], opt[1])
        else:
            G_opt = make_normalized_G(self, self.m, opt, c_null)

        self.calc_H_F_Phi(G_opt, cov_y_xlag)
        return opt
コード例 #21
0
# +
import sys


sys.path.insert(0, "../..")

from autograd.scipy.special import logsumexp

import pymanopt
from pymanopt import Problem
from pymanopt.manifolds import Euclidean, Product, SymmetricPositiveDefinite
from pymanopt.optimizers import SteepestDescent


# (1) Instantiate the manifold
manifold = Product([SymmetricPositiveDefinite(D + 1, k=K), Euclidean(K - 1)])

# (2) Define cost function
# The parameters must be contained in a list theta.
@pymanopt.function.autograd(manifold)
def cost(S, v):
    # Unpack parameters
    nu = np.append(v, 0)

    logdetS = np.expand_dims(np.linalg.slogdet(S)[1], 1)
    y = np.concatenate([samples.T, np.ones((1, N))], axis=0)

    # Calculate log_q
    y = np.expand_dims(y, 0)

    # 'Probability' of y belonging to each cluster
コード例 #22
0
    def run(self,
            S,
            F,
            v=None,
            C=None,
            fs=None,
            omega=None,
            maxiter=500,
            tol=1e-10,
            variant='bp'):
        '''
        Run MERLiN algorithm.
        Whether to run a scalar variant, i.e. S -> C -> w'F, or a
        timeseries variant, i.e. S -> C -> bp(w'F) is determined by the
        dimensionality of the input F.

        Input (default)
            - S
                (m x 1) np.array that contains the samples of S
            - F
                either a (d x m) np.array that contains the linear mixture
                samples or a (d x m x n) np.array that contains the linearly
                mixed timeseries of length n (d channels, m trials)
            - v
                (d x 1) np.array holding the linear combination that
                extracts middle node C from F
            - C
                (m x 1) np.array that contains the samples of the middle
                node C
            - fs
                sampling rate in Hz
            - omega
                tuple of (low, high) cut-off of desired frequency band
            - maxiter (500)
                maximum iterations to run the optimisation algorithm for
            - tol (1e-16)
                terminate optimisation if step size < tol or grad norm < tol
            - variant ('bp')
                determines which MERLiN variant to use on timeseries data
                ('bp' = MERLiNbp algorithm ([1], Algorithm 4),
                 'bpicoh' = MERLiNbpicoh algorithm ([1], Algorithm 5),
                 'nlbp' = MERLiNnlbp)

        Output
            - w
                linear combination that was found and should extract the
                effect of C from F
            - converged
                boolean that indicates whether the stopping criterion was
                met before the maximum number of iterations was performed
            - curob
                objecive functions value at w
        '''
        self._S = S
        self._Forig = F
        self._fs = fs
        self._omega = omega
        self._d = F.shape[0]
        self._m = F.shape[1]

        # scalar or timeseries mode
        if F.ndim == 3:
            self._mode = 'timeseries'
            self._n = F.shape[2]
            if not (fs and omega):
                raise ValueError('Both the optional arguments fs and omega '
                                 'need to be provided.')
            if self._verbosity:
                print('Launching MERLiN' + variant + ' for iid sampled '
                      'timeseries chunks.')
        elif F.ndim == 2:
            self._mode = 'scalar'
            if self._verbosity:
                print('Launching MERLiN for iid sampled scalars.')
        else:
            raise ValueError('F needs to be a 2-dimensional numpy array '
                             '(iid sampled scalars) or a 3-dimensional '
                             'numpy array (iid sampled timeseries chunks).')

        self._prepare(v, C)

        if self._mode is 'scalar':
            problem = self._problem_MERLiN()
        elif variant is 'bp':
            problem = self._problem_MERLiNbp()
        elif variant is 'bpicoh':
            problem = self._problem_MERLiNbpicoh()
        elif variant is 'nlbp':
            problem = self._problem_MERLiNnlbp()
        else:
            raise NotImplementedError

        if variant is not 'nlbp':
            problem.manifold = Sphere(self._d, 1)
        elif variant is 'nlbp':
            problem.manifold = Product(
                [Sphere(self._d, 1),
                 Euclidean(1, 1),
                 Euclidean(1, 1)])

        # choose best out of ten 10-step runs as initialisation
        solver = SteepestDescent(maxiter=10, logverbosity=1)
        res = [solver.solve(problem) for k in range(0, 10)]
        obs = [-r[1]['final_values']['f(x)'] for r in res]
        w0 = res[obs.index(max(obs))][0]

        solver = SteepestDescent(maxtime=float('inf'),
                                 maxiter=maxiter,
                                 mingradnorm=tol,
                                 minstepsize=tol,
                                 logverbosity=1)
        if self._verbosity:
            print('Running optimisation algorithm.')
        w, info = solver.solve(problem, x=w0)
        if variant is 'nlbp':
            w = w[0]
        converged = maxiter != info['final_values']['iterations']
        curob = -float(info['final_values']['f(x)'])
        if self._verbosity:
            print('DONE.')
        return self._P.T.dot(w), converged, curob
コード例 #23
0
ファイル: test_euclidean.py プロジェクト: pymanopt/pymanopt
    def setUp(self):
        self.m = m = 10
        self.n = n = 5
        self.manifold = Euclidean(m, n)

        super().setUp()
コード例 #24
0
def cross_validation(odom_1,
                     aligned_1,
                     odom_2,
                     aligned_2,
                     type_1,
                     type_2,
                     K=10):
    """Function to run cross-validation to run nonlinear optimization for optimal
    pose estimation and evaluation.  Performs cross-validation K times and splits
    the dataset into K (approximately) even splits, to be used for in-sample
    training and out-of-sample evaluation.

    This function estimates a relative transformation between two lidar frames
    using nonlinear optimization, and evaluates the robustness of this estimate
    through K-fold cross-validation performance of our framework.  Though this
    function does not return any values, it saves all results in the
    'results' relative path.

    Parameters:
        odom_1 (pd.DataFrame):  DataFrame corresponding to odometry data for the
            pose we wish to transform into the odom_2 frame of reference.  See
            data/main_odometry.csv for an example of the headers/columns/data
            types this function expects this DataFrame to have.

        aligned_1 (pd.DataFrame): DataFrame corresponding to aligned odometry
            data given the 3 sets of odometry data for the 3 lidar sensors.  This
            data corresponds to the odom_1 sensor frame.

        odom_2 (pd.DataFrame):  DataFrame corresponding to odometry data for the
            pose we wish to transform the odom_1 frame of reference into.  See
            data/main_odometry.csv for an example of the headers/columns/data
            types this function expects this DataFrame to have.

        aligned_2 (pd.DataFrame): DataFrame corresponding to aligned odometry
            data given the 3 sets of odometry data for the 3 lidar sensors.  This
            data corresponds to the odom_2 sensor frame.

        type_1 (str):  String denoting the lidar type.  Should be in the set
            {'main', 'front', 'rear'}.  This type corresponds to the data type
            for the odom_1 frame.

        type_2 (str):  String denoting the lidar type.  Should be in the set
            {'main', 'front', 'rear'}.  This type corresponds to the data type
            for the odom_2 frame.

        K (int):  The number of folds to be used for cross-validation.  Defaults
            to 10.
    """
    # Get ICP covariance matrices
    # Odom 1 lidar odometry
    odom1_icp, odom1_trans_cov, odom1_trans_cov_max, \
    odom1_trans_cov_avg, odom1_rot_cov, odom1_rot_cov_max, \
    odom1_rot_cov_avg, odom1_reject = parse_icp_cov(odom_1, type=type_1,
                                                  reject_thr=REJECT_THR)

    # Odom 2 lidar odometry
    odom2_icp, odom2_trans_cov, odom2_trans_cov_max, \
    odom2_trans_cov_avg, odom2_rot_cov, odom2_rot_cov_max, \
    odom2_rot_cov_avg, odom2_reject = parse_icp_cov(odom_2, type=type_2,
                                                    reject_thr=REJECT_THR)
    # Calculate relative poses
    (odom1_aligned,
     odom1_rel_poses) = relative_pose_processing.calc_rel_poses(aligned_1)
    (odom2_aligned,
     odom2_rel_poses) = relative_pose_processing.calc_rel_poses(aligned_2)

    # Compute weights for weighted estimate
    cov_t_odom1, cov_R_odom1 = compute_weights_euler(odom1_aligned)
    cov_t_odom2, cov_R_odom2 = compute_weights_euler(odom2_aligned)

    # Extract a single scalar using the average value from rotation and translation
    var_t_odom1 = extract_variance(cov_t_odom1, mode="max")
    var_R_odom1 = extract_variance(cov_R_odom1, mode="max")
    var_t_odom2 = extract_variance(cov_t_odom2, mode="max")
    var_R_odom2 = extract_variance(cov_R_odom2, mode="max")

    # Optimization (1) Instantiate a manifold
    translation_manifold = Euclidean(3)  # Translation vector
    so3 = Rotations(3)  # Rotation matrix
    manifold = Product((so3, translation_manifold))  # Instantiate manifold

    # Get initial guesses for our estimations
    if os.path.exists(PKL_POSES_PATH):  # Check to make sure path exists
        transforms_dict = load_transforms(
            PKL_POSES_PATH)  # Relative transforms

    # Map types to sensor names to access initial estimate relative transforms
    types2sensors = {"main": "velodyne", "front": "front", "rear": "rear"}

    # Now get initial guesses from the relative poses
    initial_guess_odom1_odom2 = transforms_dict["{}_{}".format(
        types2sensors[type_1], types2sensors[type_2])]
    # Print out all the initial estimates as poses
    print("INITIAL GUESS {} {}: \n {} \n".format(types2sensors[type_1],
                                                 types2sensors[type_2],
                                                 initial_guess_odom1_odom2))

    # Get rotation matrices for initial guesses
    R0_odom1_odom2, t0_odom1_odom2 = initial_guess_odom1_odom2[:3, :3], \
                                     initial_guess_odom1_odom2[:3, 3]
    X0_odom1_odom2 = (R0_odom1_odom2, t0_odom1_odom2)  # Pymanopt estimate
    print("INITIAL GUESS {} {}: \n R0: \n {} \n\n t0: \n {} \n".format(
        types2sensors[type_1], types2sensors[type_2], R0_odom1_odom2,
        t0_odom1_odom2))

    # Create KFold xval object to get training/validation indices
    kf = KFold(n_splits=K, random_state=None, shuffle=False)
    k = 0  # Set fold counter to 0

    # Dataset
    A = np.array(odom2_rel_poses)  # First set of poses
    B = np.array(odom1_rel_poses)  # Second set of poses
    N = len(A)
    assert len(A) == len(B)  # Sanity check to ensure odometry data matches
    r = np.logical_or(np.array(odom1_reject)[:N],
                      np.array(odom2_reject)[:N])  # Outlier rejection

    print("NUMBER OF CROSS-VALIDATION FOLDS: {}".format(K))

    # Iterate over 30 second intervals of the poses
    for train_index, test_index in kf.split(
            A):  # Perform K-fold cross-validation

        # Path for results from manifold optimization
        analysis_results_path = os.path.join(ANALYSIS_RESULTS_PATH,
                                             "k={}".format(k))
        final_estimates_path = os.path.join(FINAL_ESTIMATES_PATH,
                                            "k={}".format(k))
        odometry_plots_path = os.path.join(ODOMETRY_PLOTS_PATH,
                                           "k={}".format(k))

        # Make sure all paths exist - if they don't create them
        for path in [
                analysis_results_path, final_estimates_path,
                odometry_plots_path
        ]:
            check_dir(path)

        # Get training data
        A_train = A[train_index]
        B_train = B[train_index]
        N_train = min(A_train.shape[0], B_train.shape[0])
        r_train = r[train_index]
        print("FOLD NUMBER: {}, NUMBER OF TRAINING SAMPLES: {}".format(
            k, N_train))

        omega = np.max([var_R_odom1, var_R_odom2
                        ])  # Take average across different odometries
        rho = np.max([var_t_odom1,
                      var_t_odom2])  # Take average across different odometries

        cost_lambda = lambda x: cost(x, A_train, B_train, r_train, rho, omega,
                                     WEIGHTED)  # Create cost function
        problem = Problem(manifold=manifold,
                          cost=cost_lambda)  # Create problem
        solver = CustomSteepestDescent()  # Create custom solver
        X_opt = solver.solve(problem, x=X0_odom1_odom2)  # Solve problem
        print("Initial Guess for Main-Front Transformation: \n {}".format(
            initial_guess_odom1_odom2))
        print("Optimal solution between {} and {} "
              "reference frames: \n {}".format(types2sensors[type_1],
                                               types2sensors[type_2], X_opt))

        # Take intermediate values for plotting
        estimates_x = solver.estimates
        errors = solver.errors
        iters = solver.iterations

        # Metrics dictionary
        estimates_dict = {i: T for i, T in zip(iters, estimates_x)}
        error_dict = {i: e for i, e in zip(iters, errors)}

        # Save intermediate results to a pkl file
        estimates_fname = os.path.join(
            analysis_results_path,
            "estimates_{}_{}.pkl".format(types2sensors[type_1],
                                         types2sensors[type_2], X_opt))
        error_fname = os.path.join(
            analysis_results_path,
            "error_{}_{}.pkl".format(types2sensors[type_1],
                                     types2sensors[type_2], X_opt))

        # Save estimates to pickle file
        with open(estimates_fname, "wb") as pkl_estimates:
            pickle.dump(estimates_dict, pkl_estimates)
            pkl_estimates.close()

        # Save error to pickle file
        with open(error_fname, "wb") as pkl_error:
            pickle.dump(error_dict, pkl_error)
            pkl_error.close()

        # Calculate difference between initial guess and final
        X_opt_T = construct_pose(X_opt[0], X_opt[1].reshape((3, 1)))
        print("DIFFERENCE IN MATRICES: \n {}".format(
            np.subtract(X_opt_T, initial_guess_odom1_odom2)))

        # Compute the weighted RMSE (training/in-sample)
        train_rmse_init_weighted, train_rmse_final_weighted, train_rmse_init_R_weighted, \
        train_rmse_init_t_weighted, train_rmse_final_R_weighted, \
        train_rmse_final_t_weighted = compute_rmse_weighted(
            initial_guess_odom1_odom2, X_opt_T, A_train, B_train, rho, omega)

        # Compute the unweighted RMSE (training/in-sample)
        train_rmse_init_unweighted, train_rmse_final_unweighted, train_rmse_init_R_unweighted, \
        train_rmse_init_t_unweighted, train_rmse_final_R_unweighted, \
        train_rmse_final_t_unweighted = compute_rmse_unweighted(
            initial_guess_odom1_odom2, X_opt_T, A_train, B_train)

        # Concatenate all RMSE values for training/in-sample
        train_rmses = [
            train_rmse_init_unweighted, train_rmse_final_unweighted,
            train_rmse_init_weighted, train_rmse_final_weighted,
            train_rmse_init_R_unweighted, train_rmse_init_t_unweighted,
            train_rmse_final_R_unweighted, train_rmse_final_t_unweighted,
            train_rmse_init_R_weighted, train_rmse_init_t_weighted,
            train_rmse_final_R_weighted, train_rmse_final_t_weighted
        ]

        # Display and save RMSEs
        outpath = os.path.join(
            analysis_results_path,
            "train_rmse_{}_{}.txt".format(types2sensors[type_1],
                                          types2sensors[type_2]))
        display_and_save_rmse(train_rmses, outpath)

        # Get test data
        A_test = A[test_index]
        B_test = B[test_index]
        N_test = min(A_test.shape[0], B_test.shape[0])
        print("NUMBER OF TEST SAMPLES: {}".format(N_test))

        # Compute the weighted RMSE (testing/out-of-sample)
        test_rmse_init_weighted, test_rmse_final_weighted, test_rmse_init_R_weighted, \
        test_rmse_init_t_weighted, test_rmse_final_R_weighted, \
        test_rmse_final_t_weighted = compute_rmse_weighted(initial_guess_odom1_odom2,
                                                            X_opt_T, A_test, B_test, rho, omega)

        # Compute the unweighted RMSE (testing/out-of-sample)
        test_rmse_init_unweighted, test_rmse_final_unweighted, test_rmse_init_R_unweighted, \
        test_rmse_init_t_unweighted, test_rmse_final_R_unweighted, \
        test_rmse_final_t_unweighted = compute_rmse_unweighted(initial_guess_odom1_odom2,
                                                                X_opt_T, A_test, B_test)

        # Concatenate all RMSE values for testing/out-of-sample
        test_rmses = [
            test_rmse_init_unweighted, test_rmse_final_unweighted,
            test_rmse_init_weighted, test_rmse_final_weighted,
            test_rmse_init_R_unweighted, test_rmse_init_t_unweighted,
            test_rmse_final_R_unweighted, test_rmse_final_t_unweighted,
            test_rmse_init_R_weighted, test_rmse_init_t_weighted,
            test_rmse_final_R_weighted, test_rmse_final_t_weighted
        ]

        # Display and save RMSEs
        outpath = os.path.join(
            analysis_results_path,
            "test_rmse_{}_{}.txt".format(types2sensors[type_1],
                                         types2sensors[type_2]))
        display_and_save_rmse(test_rmses, outpath)

        # Save final estimates
        final_estimate_outpath = os.path.join(
            final_estimates_path, "{}_{}.txt".format(types2sensors[type_1],
                                                     types2sensors[type_2]))
        np.savetxt(final_estimate_outpath, X_opt_T)

        # Finally, increment k
        k += 1
コード例 #25
0
ファイル: test_product.py プロジェクト: pymanopt/pymanopt
class TestProductManifold(ManifoldTestCase):
    def setUp(self):
        self.m = m = 100
        self.n = n = 50
        self.euclidean = Euclidean(m, n)
        self.sphere = Sphere(n)
        self.manifold = Product([self.euclidean, self.sphere])

        point = self.manifold.random_point()

        @pymanopt.function.autograd(self.manifold)
        def cost(*x):
            return np.sum([np.linalg.norm(a - b)**2 for a, b in zip(x, point)])

        self.cost = cost

    def test_dim(self):
        np_testing.assert_equal(self.manifold.dim,
                                self.m * self.n + self.n - 1)

    def test_typical_dist(self):
        np_testing.assert_equal(self.manifold.typical_dist,
                                np.sqrt((self.m * self.n) + np.pi**2))

    def test_dist(self):
        X = self.manifold.random_point()
        Y = self.manifold.random_point()
        np_testing.assert_equal(
            self.manifold.dist(X, Y),
            np.sqrt(
                self.euclidean.dist(X[0], Y[0])**2 +
                self.sphere.dist(X[1], Y[1])**2),
        )

    def test_tangent_vector_multiplication(self):
        # Regression test for https://github.com/pymanopt/pymanopt/issues/49.
        manifold = Product((Euclidean(12), Grassmann(12, 3)))
        x = manifold.random_point()
        eta = manifold.random_tangent_vector(x)
        np.float64(1.0) * eta

    # def test_inner_product(self):

    # def test_projection(self):

    # def test_euclidean_to_riemannian_hessian(self):

    # def test_retraction(self):

    def test_first_order_function_approximation(self):
        self.run_gradient_approximation_test()

    def test_second_order_function_approximation(self):
        self.run_hessian_approximation_test()

    # def test_norm(self):

    # def test_random_point(self):

    # def test_random_tangent_vector(self):

    # def test_transport(self):

    def test_exp_log_inverse(self):
        s = self.manifold
        X = s.random_point()
        Y = s.random_point()
        Yexplog = s.exp(X, tangent_vector=s.log(X, Y))
        np_testing.assert_almost_equal(s.dist(point_a=Y, point_b=Yexplog), 0)

    def test_log_exp_inverse(self):
        s = self.manifold
        X = s.random_point()
        U = s.random_tangent_vector(X)
        Ulogexp = s.log(X, s.exp(X, U))
        np_testing.assert_array_almost_equal(U[0], Ulogexp[0])
        np_testing.assert_array_almost_equal(U[1], Ulogexp[1])

    def test_pair_mean(self):
        s = self.manifold
        X = s.random_point()
        Y = s.random_point()
        Z = s.pair_mean(X, Y)
        np_testing.assert_array_almost_equal(s.dist(X, Z), s.dist(Y, Z))
コード例 #26
0
class TestProductManifold(unittest.TestCase):
    def setUp(self):
        self.m = m = 100
        self.n = n = 50
        self.euclidean = Euclidean(m, n)
        self.sphere = Sphere(n)
        self.man = Product([self.euclidean, self.sphere])

    def test_dim(self):
        np_testing.assert_equal(self.man.dim, self.m * self.n + self.n - 1)

    def test_typicaldist(self):
        np_testing.assert_equal(self.man.typicaldist,
                                np.sqrt((self.m * self.n) + np.pi**2))

    def test_dist(self):
        X = self.man.rand()
        Y = self.man.rand()
        np_testing.assert_equal(
            self.man.dist(X, Y),
            np.sqrt(
                self.euclidean.dist(X[0], Y[0])**2 +
                self.sphere.dist(X[1], Y[1])**2))

    def test_tangent_vector_multiplication(self):
        # Regression test for https://github.com/pymanopt/pymanopt/issues/49.
        man = Product((Euclidean(12), Grassmann(12, 3)))
        x = man.rand()
        eta = man.randvec(x)
        np.float64(1.0) * eta

    # def test_inner(self):

    # def test_proj(self):

    # def test_ehess2rhess(self):

    # def test_retr(self):

    # def test_egrad2rgrad(self):

    # def test_norm(self):

    # def test_rand(self):

    # def test_randvec(self):

    # def test_transp(self):

    def test_exp_log_inverse(self):
        s = self.man
        X = s.rand()
        Y = s.rand()
        Yexplog = s.exp(X, s.log(X, Y))
        np_testing.assert_almost_equal(s.dist(Y, Yexplog), 0)

    def test_log_exp_inverse(self):
        s = self.man
        X = s.rand()
        U = s.randvec(X)
        Ulogexp = s.log(X, s.exp(X, U))
        np_testing.assert_array_almost_equal(U[0], Ulogexp[0])
        np_testing.assert_array_almost_equal(U[1], Ulogexp[1])

    def test_pairmean(self):
        s = self.man
        X = s.rand()
        Y = s.rand()
        Z = s.pairmean(X, Y)
        np_testing.assert_array_almost_equal(s.dist(X, Z), s.dist(Y, Z))
コード例 #27
0
if __name__ == "__main__":
    # Generate random data
    X = np.random.randn(3, 100)
    Y = X[0:1, :] - 2 * X[1:2, :] + np.random.randn(1, 100) + 5

    # Cost function is the sqaured test error
    w = T.matrix()
    b = T.matrix()
    cost = T.sum((Y - w.T.dot(X) - b[0, 0])**2)

    # A solver that involves the hessian
    solver = TrustRegions()

    # R^3 x R^1
    manifold = Product([Euclidean(3, 1), Euclidean(1, 1)])

    # Solve the problem with pymanopt
    problem = Problem(manifold=manifold, cost=cost, arg=[w, b], verbosity=0)
    wopt = solver.solve(problem)

    print('Weights found by pymanopt (top) / ' 'closed form solution (bottom)')

    print(wopt[0].T)
    print(wopt[1])

    X1 = np.concatenate((X, np.ones((1, 100))), axis=0)
    wclosed = np.linalg.inv(X1.dot(X1.T)).dot(X1).dot(Y.T)
    print(wclosed[0:3].T)
    print(wclosed[3])
コード例 #28
0
 def test_tangent_vector_multiplication(self):
     # Regression test for https://github.com/pymanopt/pymanopt/issues/49.
     man = Product((Euclidean(12), Grassmann(12, 3)))
     x = man.rand()
     eta = man.randvec(x)
     np.float64(1.0) * eta
     Sigma_norm_vec = np.reshape(Sigma_norm_B,(n*n,1))
     Wcd_vec = np.dot(np.linalg.inv(lyap_tmp),Sigma_norm_vec)
     Wcd = np.reshape(Wcd_vec,(n,n))
     Wnum = np.dot(Wo,Wcd)
     Wden = np.dot(Wo,Wcd-Sigma_norm_B)
     num = np.linalg.det(Wnum+sigma2*I)
     den = np.linalg.det(Wden+sigma2*I)
     return -np.log2(num/den)/(2.*T)
     
 def make_shannon_capacity_fact_fixed_time(T): 
     def shannon_capacity_fact_fixed_time(L):
         return shannon_rate(L, T)  
     return shannon_capacity_fact_fixed_time
    
 # instantiate manifold for Pymanopt               
 manifold_fact = Euclidean(n,n)
 # instantiate solver for Pymanopt
 solver = TrustRegions()
 
     
 #### find R_max via bisection method ####
 
 # min value T
 T1=1e-8
 shannon_capacity_fact_fixed_time = make_shannon_capacity_fact_fixed_time(T1)
 problem = Problem(manifold=manifold_fact, cost=shannon_capacity_fact_fixed_time, verbosity=0)
 L_opt1 = solver.solve(problem)
 p_1 = -shannon_capacity_fact_fixed_time(L_opt1)
 #print p_1
 
 Sigma_opt1 = np.dot(L_opt1,L_opt1.transpose())
コード例 #30
0
ファイル: test_solver_sgd.py プロジェクト: ronakrm/pymanopt
    # Set up Estimate
    W1_hatA = tf.Variable(tf.ones([dx,dx]))
    W2_hatA = tf.Variable(tf.ones([dx,dy]))

    # Cost function is the sqaured test error
    W_hatA = tf.matmul(W1_hatA, W2_hatA)
    Y_hatA = tf.matmul(X, W_hatA)
    costA = tf.reduce_mean(tf.square(Y - Y_hatA))


    # first-order, second-order
    solver = StochasticGradient(stepsize=lr, logverbosity=2)
    
    # Product Manifold
    manifold = Product(( Euclidean(dx, dx), Euclidean(dx, dy) ))

    # Solve the problem with pymanopt
    problem = Problem(manifold=manifold, cost=cost, arg=[W1_hat, W2_hat], verbosity=2)

    # tf standard GD
    tf_gd_solver = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(costA)


    niters = 10
    wopt = [np.ones([dx,dx]), np.ones([dx,dy])] # initial point
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    print('SGD-PYMANOPT\tSGD-STANDARDTF')
    for i in range(0,niters):