Exemplo n.º 1
0
def cost_around_local_minimum(x0, *args):
    delta = 1e-10
    res_minimum = cost_function(x0, *args)
    cost_minimum = 0.5 * np.sum(res_minimum**2)
    for i in range(len(x0)):
        x0_delta = x0.copy()
        x0_delta[i] += delta
        res_delta = cost_function(x0_delta, *args)
        cost_delta = 0.5 * np.sum(res_delta**2)
        difference = cost_delta - cost_minimum
        if difference < 0:
            assert abs(
                difference
            ) < delta, f'difference negative and too big: {difference:.2e} > {EPS}'
Exemplo n.º 2
0
    def test_cost_function(self):
        """ Test that cost for noiseless is indeed zero. """
        for i in range(N_IT):
            self.set_measurements(seed=i)

            C_k_vec = self.traj.coeffs.reshape((-1, ))
            cost = cost_function(C_k_vec, self.D_topright, self.anchors,
                                 self.basis)
            self.assertTrue(np.all(cost < EPS))

            cost_around_local_minimum(C_k_vec, self.D_topright, self.anchors,
                                      self.basis)
Exemplo n.º 3
0
    def test_least_squares_lm(self):
        mask = create_mask(*self.D_gt.shape, strategy='single_time')
        D_sparse = self.D_gt * mask

        C_gt_vec = self.traj.coeffs.reshape((-1, ))
        cost_gt = cost_function(C_gt_vec, D_sparse, self.anchors[:2, :],
                                self.basis)
        self.assertTrue(np.sum(np.abs(cost_gt)) < eps)

        Chat = self.traj.coeffs
        x0 = Chat.copy().reshape((-1, ))
        Cref = least_squares_lm(D_sparse, self.anchors, self.basis, x0)
        self.assertLess(error_measure(Cref, self.traj.coeffs), eps)
Exemplo n.º 4
0
    def test_convergence(self):
        """ Test that we converge correctly. """
        sigma = 0.01
        for i in range(N_IT):
            self.set_measurements(seed=i)

            D_noisy = add_noise(self.D_topright, noise_sigma=sigma)
            x0 = self.traj.coeffs.reshape((-1, ))
            cost0 = cost_function(x0, D_noisy, self.anchors, self.basis)

            xhat = least_squares_lm(D_noisy,
                                    self.anchors,
                                    self.basis,
                                    x0=x0,
                                    verbose=VERBOSE)
            xhat = xhat.reshape((-1, ))
            costhat = cost_function(xhat, D_noisy, self.anchors, self.basis)
            self.assertLessEqual(np.sum(costhat**2), np.sum(cost0**2))
            try:
                cost_around_local_minimum(xhat, D_noisy, self.anchors,
                                          self.basis)
            except Exception as e:
                print(f'test_convergence failed at seed {i}')
                print('Error message:', e)
Exemplo n.º 5
0
def generate_results(traj,
                     D_small,
                     times_small,
                     anchors,
                     points_small,
                     methods=METHODS,
                     n_it=0):
    n_complexity = traj.n_complexity
    n_measurements = np.sum(D_small > 0)
    current_results = pd.DataFrame(columns=[
        'n_it', 'n_complexity', 'n_measurements', 'mae', 'mse', 'method',
        'plotting', 'cost_rls', 'cost_srls'
    ])

    basis_small = traj.get_basis(times=times_small)

    for method in methods:
        C_hat, p_hat, lat_idx = apply_algorithm(traj,
                                                D_small,
                                                times_small,
                                                anchors,
                                                method=method)
        plotting = (C_hat, p_hat)
        mae = mse = cost_rls = cost_slrs = None
        if C_hat is not None:
            traj.set_coeffs(coeffs=C_hat)
            p_fitted = traj.get_sampling_points(times=times_small).T
            mae = error_measure(p_fitted, points_small, 'mae')
            mse = error_measure(p_fitted, points_small, 'mse')
            cost_rls = np.sum(
                cost_function(C_hat.reshape((-1, )),
                              D_small,
                              anchors,
                              basis_small,
                              squared=False))
            cost_srls = np.sum(
                cost_function(C_hat.reshape((-1, )),
                              D_small,
                              anchors,
                              basis_small,
                              squared=True))
        current_results.loc[len(current_results)] = dict(
            plotting=plotting,
            n_complexity=n_complexity,
            n_measurements=n_measurements,
            method=method,
            n_it=n_it,
            mae=mae,
            mse=mse,
            cost_rls=cost_rls,
            cost_srls=cost_srls)

        # do raw version if applicable
        if method in ['rls', 'srls']:
            points_small_lat = points_small[lat_idx]
            mae = error_measure(p_hat, points_small_lat, 'mae')
            mse = error_measure(p_hat, points_small_lat, 'mse')
            current_results.loc[len(current_results)] = dict(
                plotting=(None, None),
                n_complexity=n_complexity,
                n_measurements=n_measurements,
                method=method + ' raw',
                n_it=n_it,
                mae=mae,
                mse=mse,
                cost_rls=cost_rls,
                cost_srls=cost_srls)
    return current_results
Exemplo n.º 6
0
    def test_cost_jacobian(self):
        """ Test with finite differences that Jacobian is correct."""
        i = 1
        self.set_measurements(seed=i)

        # TODO(FD):
        # We make sigma very small to test if the cost function
        # behaves well at least around the optimum.
        # It is not clear why it does not behave well elsewhere.
        sigma = 1e-10

        D_noisy = add_noise(self.D_topright, noise_sigma=sigma)

        C_k_vec = self.traj.coeffs.reshape((-1, ))
        jacobian = cost_jacobian(C_k_vec, D_noisy, self.anchors, self.basis)

        cost = cost_function(C_k_vec,
                             D_noisy,
                             self.anchors,
                             self.basis,
                             squared=True)
        N = len(cost)
        Kd = len(C_k_vec)

        # make delta small enough but not too small.
        deltas = list(np.logspace(-15, -1, 10))[::-1]
        previous_jac = 1000
        convergence_lim = 1e-5

        for delta in deltas:
            jacobian_est = np.empty((N, Kd))
            for k in range(Kd):
                C_k_delta = C_k_vec.copy()
                C_k_delta[k] += delta
                cost_delta = cost_function(C_k_delta,
                                           D_noisy,
                                           self.anchors,
                                           self.basis,
                                           squared=True)
                jacobian_est[:, k] = (cost_delta - cost) / delta

            new_jac = jacobian_est
            difference = np.sum(np.abs(previous_jac - new_jac))
            if np.sum(np.abs(new_jac)) < EPS:
                print('new jacobian is all zero! use previous jacobian.')
                break
            elif difference < convergence_lim:
                print(f'Jacobian converged at delta={delta}.')
                previous_jac = new_jac
                break
            else:  # not converged yet.
                previous_jac = new_jac
        jacobian_est = previous_jac
        print('===== first element =====:')
        print(
            f'jacobian est vs. real: {jacobian_est[0, 0]:.4e}, {jacobian[0, 0]:2e}'
        )
        print(f'difference: {jacobian_est[0, 0] - jacobian[0, 0]:.4e}')
        print('==== total difference ===:')
        print(np.sum(np.abs(jacobian_est - jacobian)))
        self.assertLessEqual(np.sum(np.abs(jacobian_est - jacobian)), 1e-4)