def test_convergence(self):
        """ Test that we converge correctly. """
        sigma = 0.01
        for i in range(N_IT):
            self.set_measurements(seed=i)

            D_noisy = add_noise(self.D_topright, noise_sigma=sigma)
            x0 = self.traj.coeffs.reshape((-1, ))
            cost0 = cost_function(x0, D_noisy, self.anchors, self.basis)

            xhat = least_squares_lm(D_noisy,
                                    self.anchors,
                                    self.basis,
                                    x0=x0,
                                    verbose=VERBOSE)
            xhat = xhat.reshape((-1, ))
            costhat = cost_function(xhat, D_noisy, self.anchors, self.basis)
            self.assertLessEqual(np.sum(costhat**2), np.sum(cost0**2))
            try:
                cost_around_local_minimum(xhat, D_noisy, self.anchors,
                                          self.basis)
            except Exception as e:
                print(f'test_convergence failed at seed {i}')
                print('Error message:', e)
    def test_combination(self):
        """ Test that if we do our method first
        and then apply LM with split method, we do not
        change the result.
        """
        sigma = 0.01
        tol = 1e-3  # found empirically.
        for i in range(3):  # works up to 10, but quite slow.
            self.set_measurements(seed=i)
            D_noisy = add_noise(self.D_topright, noise_sigma=sigma)

            x0 = trajectory_recovery(D_noisy,
                                     self.anchors,
                                     self.basis,
                                     weighted=False)
            x1 = least_squares_lm(D_noisy,
                                  self.anchors,
                                  self.basis,
                                  x0=x0.reshape((-1, )),
                                  cost='split',
                                  jacobian=False,
                                  verbose=VERBOSE)
            assert np.linalg.norm(
                x0 - x1) < tol, f'for {i}: {np.linalg.norm(x0 - x1)}'
def run_simulation(parameters, outfolder=None, solver=None, verbose=False):
    """ Run simulation. 

    :param parameters: Can be either the name of the folder where parameters.json is stored, or a new dict of parameters.

    """
    if type(parameters) == str:
        fname = parameters + 'parameters.json'
        parameters = read_params(fname)
        print('read parameters from file {}.'.format(fname))

    elif type(parameters) == dict:
        parameters = parameters

        # if we are trying new parameters and saving in a directory that already exists,
        # we need to make sure that the saved parameters are actually the same.
        if outfolder is not None:
            try:
                parameters_old = read_params(outfolder + 'parameters.json')
                parameters['time'] = parameters_old['time']
                assert parameters == parameters_old, 'found conflicting parameters file: {}'.format(outfolder +
                                                                                                    'parameters.json')
            except FileNotFoundError:
                print('no conflicting parameters file found.')
            except AssertionError as error:
                raise (error)
    else:
        raise TypeError('parameters needs to be folder name or dictionary.')

    if 'noise_to_square' not in parameters:
        parameters['noise_to_square'] = False

    if 'measure_distances' not in parameters:
        parameters['measure_distances'] = False

    if 'sampling_strategy' not in parameters:
        parameters['sampling_strategy'] = 'uniform'

    complexities = parameters['complexities']
    anchors = parameters['anchors']
    positions = parameters['positions']
    n_its = parameters['n_its']
    noise_sigmas = parameters['noise_sigmas']
    success_thresholds = parameters['success_thresholds']
    assert len(success_thresholds) == len(noise_sigmas)

    if parameters['sampling_strategy'] == 'single_time':
        max_measurements = max(positions)
    else:
        max_measurements = max(positions) * max(anchors)

    successes = np.full((len(complexities), len(anchors), len(positions), len(noise_sigmas), max_measurements), np.nan)
    errors = np.full(successes.shape, np.nan)
    relative_errors = np.full(successes.shape, np.nan)
    absolute_errors = np.full(successes.shape, np.nan)
    num_not_solved = np.full(successes.shape, np.nan)
    num_not_accurate = np.full(successes.shape, np.nan)
    squared_distances = []

    for c_idx, n_complexity in enumerate(complexities):
        print('n_complexity', n_complexity)

        for a_idx, n_anchors in enumerate(anchors):
            print('n_anchors', n_anchors)

            for p_idx, n_positions in enumerate(positions):
                print('n_positions', n_positions)

                if parameters['sampling_strategy'] == 'single_time':
                    n_measurements = n_positions
                else:
                    n_measurements = n_positions * n_anchors
                for m_idx, n_missing in enumerate(range(n_measurements)):
                    if verbose:
                        print('measurements idx', m_idx)

                    for noise_idx, noise_sigma in enumerate(noise_sigmas):
                        indexes = np.s_[c_idx, a_idx, p_idx, noise_idx, m_idx]
                        if verbose:
                            print("noise", noise_sigma)

                        # set all values to 0 since we have visited them.
                        if np.isnan(successes[indexes]):
                            successes[indexes] = 0.0
                        if np.isnan(num_not_solved[indexes]):
                            num_not_solved[indexes] = 0.0
                        if np.isnan(num_not_accurate[indexes]):
                            num_not_accurate[indexes] = 0.0

                        for _ in range(n_its):

                            trajectory = Trajectory(n_complexity, dim=DIM)
                            anchors_coord = create_anchors(DIM, n_anchors)
                            trajectory.set_coeffs(seed=None)

                            basis, D_topright = get_measurements(trajectory, anchors_coord, n_samples=n_positions)
                            distances = np.sqrt(D_topright)
                            D_topright = add_noise(D_topright, noise_sigma, parameters["noise_to_square"])
                            mask = create_mask(n_positions,
                                               n_anchors,
                                               strategy=parameters['sampling_strategy'],
                                               n_missing=n_missing)
                            if parameters['measure_distances']:
                                squared_distances.extend(D_topright.flatten().tolist())
                            D_topright = np.multiply(D_topright, mask)

                            try:
                                assert p.full_rank_condition(
                                    np.sort(np.sum(mask, axis=0))[::-1], DIM + 1, n_complexity), "insufficient rank"
                                if (solver is None) or (solver == "semidef_relaxation_noiseless"):
                                    X = semidef_relaxation_noiseless(D_topright,
                                                                     anchors_coord,
                                                                     basis,
                                                                     chosen_solver=cvxpy.CVXOPT)
                                    P_hat = X[:DIM, DIM:]
                                elif solver == 'trajectory_recovery':
                                    P_hat = trajectory_recovery(D_topright, anchors_coord, basis)
                                elif solver == 'weighted_trajectory_recovery':
                                    P_hat = trajectory_recovery(D_topright, anchors_coord, basis, weighted=True)
                                else:
                                    raise ValueError(solver)

                                # calculate reconstruction error with respect to distances
                                trajectory_estimated = Trajectory(coeffs=P_hat)
                                _, D_estimated = get_measurements(trajectory_estimated,
                                                                  anchors_coord,
                                                                  n_samples=n_positions)
                                estimated_distances = np.sqrt(D_estimated)

                                robust_add(errors, indexes, np.linalg.norm(P_hat - trajectory.coeffs))
                                robust_add(relative_errors, indexes,
                                           np.linalg.norm((distances - estimated_distances) / (distances + 1e-10)))
                                robust_add(absolute_errors, indexes, np.linalg.norm(distances - estimated_distances))

                                assert not np.linalg.norm(P_hat - trajectory.coeffs) > success_thresholds[noise_idx]

                                robust_increment(successes, indexes)

                            except cvxpy.SolverError:
                                logging.info("could not solve n_positions={}, n_missing={}".format(
                                    n_positions, n_missing))
                                robust_increment(num_not_solved, indexes)

                            except ZeroDivisionError:
                                logging.info("could not solve n_positions={}, n_missing={}".format(
                                    n_positions, n_missing))
                                robust_increment(num_not_solved, indexes)

                            except np.linalg.LinAlgError:
                                robust_increment(num_not_solved, indexes)

                            except AssertionError as e:
                                if str(e) == "insufficient rank":
                                    robust_increment(num_not_solved, indexes)
                                else:
                                    logging.info("result not accurate n_positions={}, n_missing={}".format(
                                        n_positions, n_missing))
                                    robust_increment(num_not_accurate, indexes)

                        errors[indexes] = errors[indexes] / (n_its - num_not_solved[indexes])
                        relative_errors[indexes] = relative_errors[indexes] / (n_its - num_not_solved[indexes])

    results = {
        'successes': successes,
        'num-not-solved': num_not_solved,
        'num-not-accurate': num_not_accurate,
        'errors': errors,
        'relative-errors': relative_errors,
        'absolute-errors': absolute_errors,
        'distances': squared_distances
    }

    if outfolder is not None:
        print('Done with simulation. Saving results...')

        parameters['time'] = time.time()

        if not os.path.exists(outfolder):
            os.makedirs(outfolder)

        save_params(outfolder + 'parameters.json', **parameters)
        save_results(outfolder + 'result_{}_{}', results)
    else:
        return results
    def test_cost_jacobian(self):
        """ Test with finite differences that Jacobian is correct."""
        i = 1
        self.set_measurements(seed=i)

        # TODO(FD):
        # We make sigma very small to test if the cost function
        # behaves well at least around the optimum.
        # It is not clear why it does not behave well elsewhere.
        sigma = 1e-10

        D_noisy = add_noise(self.D_topright, noise_sigma=sigma)

        C_k_vec = self.traj.coeffs.reshape((-1, ))
        jacobian = cost_jacobian(C_k_vec, D_noisy, self.anchors, self.basis)

        cost = cost_function(C_k_vec,
                             D_noisy,
                             self.anchors,
                             self.basis,
                             squared=True)
        N = len(cost)
        Kd = len(C_k_vec)

        # make delta small enough but not too small.
        deltas = list(np.logspace(-15, -1, 10))[::-1]
        previous_jac = 1000
        convergence_lim = 1e-5

        for delta in deltas:
            jacobian_est = np.empty((N, Kd))
            for k in range(Kd):
                C_k_delta = C_k_vec.copy()
                C_k_delta[k] += delta
                cost_delta = cost_function(C_k_delta,
                                           D_noisy,
                                           self.anchors,
                                           self.basis,
                                           squared=True)
                jacobian_est[:, k] = (cost_delta - cost) / delta

            new_jac = jacobian_est
            difference = np.sum(np.abs(previous_jac - new_jac))
            if np.sum(np.abs(new_jac)) < EPS:
                print('new jacobian is all zero! use previous jacobian.')
                break
            elif difference < convergence_lim:
                print(f'Jacobian converged at delta={delta}.')
                previous_jac = new_jac
                break
            else:  # not converged yet.
                previous_jac = new_jac
        jacobian_est = previous_jac
        print('===== first element =====:')
        print(
            f'jacobian est vs. real: {jacobian_est[0, 0]:.4e}, {jacobian[0, 0]:2e}'
        )
        print(f'difference: {jacobian_est[0, 0] - jacobian[0, 0]:.4e}')
        print('==== total difference ===:')
        print(np.sum(np.abs(jacobian_est - jacobian)))
        self.assertLessEqual(np.sum(np.abs(jacobian_est - jacobian)), 1e-4)