def run(self, evaluator, initial_parameters, target, result = Result()):
        """Runs this optimizer.

        :param evaluator: the evaluator to use for each Evaluation needed
        :type evaluator: Evaluator
        :param initial_parameters: The initial parameters to start the optimization from
        :type initial_parameters: numpy array
        :param target: The target of the calibration
        :type target: Evaluation
        :param result: Results object to write metadata and iterations to, defaults to Result()
        :type result: Result, optional
        """
        pass
    def openFile(self):
        resultfilename = QFileDialog.getOpenFileName(
            self.main_window,
            "Select result file",
            filter="Result files (*.pkl)")[0]
        self.result = Result.load(resultfilename)
        self.status_openfile.setText(resultfilename)
        self.status_data.setText("Iterations: " +
                                 str(self.result.iterationCount))

        fields = []
        for p in self.result.metadata["parametermanager"].parameters:
            fields.append((p.name, {}))

        fields.append(("norm", {}))
        self.canvas.setFields(fields)

        self.index = 0
        self.plotNext()
    def run(self, evaluator, initial_parameters, target, result=Result()):

        evaluator.resultobj = result

        result.addRunMetadata("target", target)
        result.addRunMetadata("optimizertype", type(self).__name__)
        result.addRunMetadata("fixedparameters", evaluator.fixedparameters)
        result.addRunMetadata("parametermanager", self.parametermanager)

        result.log("-- Starting bayesian optimization. --")

        targetdata = target.getNumpyArray()

        dimensions = []

        for p in self.parametermanager.parameters:

            if p.maximumValue is None:
                print("No maximum value for parameter " + p.name +
                      " provided!")
                exit(1)

            if p.minimumValue is None:
                print("No minimum value for parameter " + p.name +
                      " provided!")
                exit(1)

            print(p.name, p.minimumValue, p.maximumValue,
                  p.optimizationSpaceLowerBound, p.optimizationSpaceUpperBound)
            dimensions.append(
                skopt.space.Real(p.optimizationSpaceLowerBound,
                                 p.optimizationSpaceUpperBound))

        bayes_optimizer = skopt.Optimizer(dimensions,
                                          base_estimator="GP",
                                          n_initial_points=2,
                                          acq_func="EI",
                                          random_state=1)

        for iteration in range(self.max_iterations):

            needed_evaluations = bayes_optimizer.ask(evaluator.parallelism)

            result_evaluations = evaluator.evaluate(needed_evaluations,
                                                    "bayes-opt")

            results = self.measurementToNumpyArrayConverter(
                result_evaluations, target)

            for ev in result_evaluations:
                if ev is None or isinstance(ev, ErroredEvaluation):
                    result.log(
                        "got a None-result! UG run did not finish or parameter was out of bounds..."
                    )
                    result.log(evaluator.getStatistics())
                    return

            Y = []

            min_S = None
            min_Index = None

            for i in range(len(results)):

                residual = results[i] - targetdata
                S = 0.5 * residual.dot(residual)

                if min_S is None or min_S > S:
                    min_S = S
                    min_Index = i

                Y.append(S)

            print(Y)

            bayes_optimizer.tell(needed_evaluations, Y)

            result.addMetric("residualnorm", min_S)
            result.addMetric("parameters", needed_evaluations[min_Index])
            result.addMetric("measurement", results[min_Index])
            result.addMetric("measurementEvaluation",
                             result_evaluations[min_Index])

            result.log("[" + str(iteration) + "]: best_param=" +
                       str(needed_evaluations[min_Index]) +
                       ", residual norm S=" + str(min_S))

            result.commitIteration()

        result.addRunMetadata(
            "skopt-res",
            skopt.utils.create_result(bayes_optimizer.Xi,
                                      bayes_optimizer.yi,
                                      space=bayes_optimizer.space,
                                      models=bayes_optimizer.models))
        result.save()

        if (iteration == self.max_iterations - 1):
            result.log("-- Bayesian optimization did not converge. --")

        result.log(evaluator.getStatistics())
        return result
示例#4
0
    def run(self, evaluator, initial_parameters, target, result=Result()):

        evaluator.resultobj = result

        result.addRunMetadata("target", target)
        result.addRunMetadata("optimizertype", type(self).__name__)
        result.addRunMetadata("epsilon", self.finite_differencing_epsilon)
        result.addRunMetadata("differencing", self.differencing.value)
        result.addRunMetadata("fixedparameters",
                              self.evaluator.fixedparameters)
        result.addRunMetadata("parametermanager",
                              self.evaluator.parametermanager)

        result.log("-- Starting newton method. --")

        targetdata = target.getNumpyArray()

        first_S = -1

        x = initial_parameters
        n = len(x)

        U = x + (self.maximum - self.minimum)
        L = x - (self.maximum - self.minimum)

        for iteration in range(self.max_iterations):

            print("x=" + str(x))
            print("U=" + str(U))
            print("L=" + str(L))

            jacobi_result = self.getJacobiMatrix(x, evaluator, target, result)
            if jacobi_result is None:
                result.log(
                    "Error calculating Jacobi matrix, UG run did not finish")
                return

            J, measurement_evaluation = jacobi_result
            measurement = measurement_evaluation.getNumpyArrayLike(target)

            residual = measurement - targetdata
            S = np.linalg.norm(residual)

            # save the residualnorm S for calculation of the relative reduction
            if first_S == -1:
                first_S = S

            # calculate the gradient at the current point
            # this is since f = 1/2 \sum r^2, grad = J^T r
            grad = J.transpose().dot(measurement)
            print("grad=" + str(grad))

            result.addMetric("residuals", residual)
            result.addMetric("residualnorm", S)
            result.addMetric("parameters", x)
            result.addMetric("jacobian", J)
            result.addMetric("measurement", measurement)
            result.addMetric("measurementEvaluation", measurement_evaluation)

            result.log("\t [" + str(iteration) + "]: Residual norm S=" +
                       str(S))

            p = np.zeros_like(x)
            q = np.zeros_like(x)
            r = residual
            next_x = np.zeros_like(x)
            alpha = np.zeros_like(x)
            beta = np.zeros_like(x)

            l_deriv = lambda arg, j: (p[j] / np.square(U[j] - arg)) - (q[
                j] / np.square(arg - L[j]))

            for i in range(n):
                if grad[i] > 0:
                    p[i] = np.square(U[i] - x[i]) * grad[i]
                elif grad[i] < 0:
                    q[i] = -np.square(x[i] - L[i]) * grad[i]

                r -= p[i] / (U[i] - x[i])
                r -= q[i] / (x[i] - L[i])

                alpha[i] = max(self.minimum[i], 0.9 * L[i] + 0.1 * x[i])
                beta[i] = min(self.maximum[i], 0.9 * U[i] + 0.1 * x[i])

                if l_deriv(alpha[i], i) >= 0:
                    next_x[i] = alpha[i]
                elif l_deriv(beta[i], i) <= 0:
                    next_x[i] = beta[i]
                elif l_deriv(alpha[i], i) < 0 and l_deriv(beta[i], i) > 0:
                    next_x[i] = (np.sqrt(p[i]) * L[i] + np.sqrt(q[i]) *
                                 U[i]) / (np.square(p[i]) + np.square(q[i]))
                else:
                    next_x[i] = x[i]
                    print("l_deriv strange")

            if (S / first_S < self.minreduction):
                result.log("-- MMA converged. --")
                result.commitIteration()
                break

            result.commitIteration()

            last_S = S
            x = next_x

        if (iteration == self.max_iterations - 1):
            result.log("-- MMA did not converge. --")

        return result
    def run(self, evaluator, initial_parameters, target, result=Result()):

        guess = initial_parameters

        evaluator.resultobj = result

        result.addRunMetadata("target", target)
        result.addRunMetadata("optimizertype", type(self).__name__)
        result.addRunMetadata("epsilon", self.finite_differencing_epsilon)
        result.addRunMetadata("differencing", self.differencing.value)
        result.addRunMetadata("lambda_init", self.initial_lam)
        result.addRunMetadata("nu", self.nu)
        result.addRunMetadata("fixedparameters", evaluator.fixedparameters)
        result.addRunMetadata("parametermanager", evaluator.parametermanager)

        result.log("-- Starting Levenberg-Marquardt method. --")

        targetdata = target.getNumpyArray()

        first_S = -1
        lam = self.initial_lam

        for i in range(self.maxiterations):

            jacobi_result = self.getJacobiMatrix(guess, evaluator, target,
                                                 result)
            if jacobi_result is None:
                result.log(
                    "Error calculating Jacobi matrix, UG run did not finish")
                result.log(evaluator.getStatistics())
                result.save()
                return

            V, measurementEvaluation = jacobi_result
            measurement = measurementEvaluation.getNumpyArrayLike(target)

            r = measurement - targetdata

            S = 0.5 * r.dot(r)

            # save the residualnorm S for calculation of the relative reduction
            if first_S == -1:
                first_S = S

            n = len(targetdata)
            p = len(guess)
            dof = n - p

            # calculate s^2 = residual mean square / variance estimate (p.6 Bates/Watts)

            variance = None if dof == 0 else S / dof

            result.addMetric("residuals", r)
            result.addMetric("residualnorm", S)
            result.addMetric("parameters", guess)
            result.addMetric("jacobian", V)
            result.addMetric("variance", variance)
            result.addMetric("measurement", measurement)
            result.addMetric("measurementEvaluation", measurementEvaluation)

            result.log("[" + str(i) + "]: x=" + str(guess) +
                       ", residual norm S=" + str(S) + ", lambda=" + str(lam))

            # cancel the optimization when the reduction of the norm of the residuals is below the threshhold
            if (S / first_S < self.minreduction):
                result.log("-- Levenberg-Marquardt method converged. --")
                result.commitIteration()
                break

            delta_lower_lam = self.calculateDelta(V, r, p, lam / self.nu)
            delta_prev_lam = self.calculateDelta(V, r, p, lam)
            delta_higher_lam = self.calculateDelta(V, r, p, lam * self.nu)

            evals = evaluator.evaluate([
                guess + delta_lower_lam, guess + delta_prev_lam,
                guess + delta_higher_lam
            ])
            evalvecs = self.measurementToNumpyArrayConverter(evals, target)

            S_lower_lam = None if evalvecs[0] is None else 0.5 * (
                evalvecs[0] - targetdata).dot(evalvecs[0] - targetdata)
            S_prev_lam = None if evalvecs[1] is None else 0.5 * (
                evalvecs[1] - targetdata).dot(evalvecs[1] - targetdata)
            S_higher_lam = None if evalvecs[2] is None else 0.5 * (
                evalvecs[2] - targetdata).dot(evalvecs[2] - targetdata)

            found = False
            if S_lower_lam is None:
                result.log("\t lam = " + str(lam / self.nu) + ": " +
                           evals[0].reason)
            else:
                result.log("\t lam = " + str(lam / self.nu) + ": f=" +
                           str(S_lower_lam))

            if S_prev_lam is None:
                result.log("\t lam = " + str(lam) + ": " + evals[1].reason)
            else:
                result.log("\t lam = " + str(lam) + ": f=" + str(S_prev_lam))

            if S_higher_lam is None:
                result.log("\t lam = " + str(lam * self.nu) + ": " +
                           evals[2].reason)
            else:
                result.log("\t lam = " + str(lam * self.nu) + ": f=" +
                           str(S_higher_lam))

            if S_lower_lam is not None and S_lower_lam <= S:
                lam = lam / self.nu
                new_S = S_lower_lam
                nextguess = guess + delta_lower_lam
            elif S_prev_lam is not None and S_prev_lam <= S:
                new_S = S_prev_lam
                nextguess = guess + delta_prev_lam
            elif S_higher_lam is not None and S_higher_lam < S:
                lam = lam * self.nu
                new_S = S_higher_lam
                nextguess = guess + delta_higher_lam
            else:
                for zl in range(self.P_iteration_count):
                    points = []
                    for z in range(self.P):
                        new_lam = lam * self.nu**(zl * self.P + z)
                        delta = self.calculateDelta(V, r, p, new_lam)
                        points.append(guess + delta)

                    evals = evaluator.evaluate(points)
                    evalvecs = self.measurementToNumpyArrayConverter(
                        evals, target)

                    costs = [
                        None if x is None else 0.5 *
                        (x - targetdata).dot(x - targetdata) for x in evalvecs
                    ]

                    for z in range(self.P):
                        if costs[z] is None:
                            result.log("\t lam = " + str(lam * self.nu**z) +
                                       ": " + evals[z].reason)
                        else:
                            result.log("\t lam = " + str(lam * self.nu**z) +
                                       ": f=" + str(costs[z]))
                    for z in range(self.P):
                        if costs[z] is not None and costs[z] < S:
                            lam = lam * self.nu**z
                            new_S = costs[z]
                            nextguess = points[z]
                            found = True
                    if found:
                        break
                if not found:
                    result.log(
                        "-- Levenberg-Marquardt method did not converge. --")
                    result.commitIteration()
                    result.log(evaluator.getStatistics())
                    result.save()
                    return result

            result.log("[" + str(i) + "] best lam was = " + str(lam) +
                       " with f=" + str(new_S))

            result.addMetric("lambda", lam)
            result.addMetric("residualnorm_new", new_S)
            result.addMetric("reduction", new_S / S)

            result.commitIteration()

            guess = nextguess

        if (i == self.maxiterations - 1):
            result.log("-- Levenberg-Marquardt method did not converge. --")

        result.log(evaluator.getStatistics())
        result.save()
        return result
    def run(self, evaluator, initial_parameters, target, result=Result()):

        guess = initial_parameters

        evaluator.setResultObject(result)

        result.addRunMetadata("target", target)
        result.addRunMetadata("optimizertype", type(self).__name__)
        result.addRunMetadata("epsilon", self.finite_differencing_epsilon)
        result.addRunMetadata("differencing", self.differencing.value)
        result.addRunMetadata("fixedparameters", evaluator.fixedparameters)
        result.addRunMetadata("parametermanager", self.parametermanager)

        result.log("-- Starting scipy optimization. --")

        targetdata = target.getNumpyArray()

        iteration_count = [0]
        last_S = [-1]

        # assemble bounds
        upper = []
        lower = []

        for p in self.parametermanager.parameters:

            if p.maximumValue is None:
                upper.append(np.inf)
            else:
                # this is needed to still have some space to do the finite differencing for the jacobi matrix
                upper.append(p.optimizationSpaceUpperBound /
                             (1 + self.finite_differencing_epsilon))

            if p.minimumValue is None:
                lower.append(-np.inf)
            else:
                lower.append(p.optimizationSpaceLowerBound)

        bounds = scipy.optimize.Bounds(lower, upper)

        # define the callbacks for scipy
        def scipy_function(x):
            result.log("\tEvaluating cost function at x=" + str(x))
            evaluation = evaluator.evaluate([x], True,
                                            "function-evaluation")[0]
            if isinstance(evaluation, ErroredEvaluation):
                result.log("Got a ErroredEvaluation: " + evaluation.reason)
                result.log(evaluator.getStatistics())
                result.save()
                exit()

            measurement = evaluation.getNumpyArrayLike(target)
            r = measurement - targetdata
            S = 0.5 * r.dot(r)

            result.log("\t cost function is " + str(S))

            result.addMetric("parameters", x)
            result.addMetric("residualnorm", S)
            result.addMetric("measurement", measurement)
            result.addMetric("measurementEvaluation", evaluation)
            result.addMetric("residuals", r)

            if (last_S[0] != -1):
                result.addMetric("reduction", S / last_S[0])

            last_S[0] = S

            # https://stackoverflow.com/a/47443343

            if self.callback_root:
                return self.callback_scaling * np.sqrt(S)
            else:
                return self.callback_scaling * S

        def scipy_jacobi(x):
            result.log("\tEvaluating jacobi matrix at at x=" + str(x))
            jacobi_result = self.getJacobiMatrix(x, evaluator, target, result)
            if jacobi_result is None:
                result.log(
                    "Error calculating Jacobi matrix, UG run did not finish")
                result.log(evaluator.getStatistics())
                result.save()
                return

            V, measurementEvaluation = jacobi_result
            result.addMetric("jacobian", V)
            V = V.transpose()
            measurement = measurementEvaluation.getNumpyArrayLike(target)
            r = (measurement - targetdata)
            grad = V.dot(r)
            return grad

        def scipy_callback(xk):

            iteration_count[0] += 1

            result.log("[" + str(iteration_count[0]) + "]: parameters=" +
                       str(xk))

            result.commitIteration()
            return False

        scipy_result = scipy.optimize.minimize(fun=scipy_function,
                                               x0=guess,
                                               jac=scipy_jacobi,
                                               bounds=bounds,
                                               callback=scipy_callback,
                                               method=self.opt_method)

        result.log("result is " + str(scipy_result))

        result.log(evaluator.getStatistics())
        result.save()

        return result
    def run(self, evaluator, initial_parameters, target, result=Result()):

        guess = initial_parameters

        evaluator.setResultObject(result)

        result.addRunMetadata("target", target)
        result.addRunMetadata("optimizertype", type(self).__name__)
        result.addRunMetadata("epsilon", self.finite_differencing_epsilon)
        result.addRunMetadata("differencing", self.differencing.value)
        result.addRunMetadata("fixedparameters", evaluator.fixedparameters)
        result.addRunMetadata("parametermanager", evaluator.parametermanager)

        result.log("-- Starting scipy optimization. --")

        targetdata = target.getNumpyArray()

        # assemble bounds
        bounds = ([], [])

        for p in self.parametermanager.parameters:

            if p.maximumValue is None:
                bounds[1].append(np.inf)
            else:
                bounds[1].append(p.optimizationSpaceUpperBound /
                                 (1 + self.finite_differencing_epsilon))

            if p.minimumValue is None:
                bounds[0].append(-np.inf)
            else:
                bounds[0].append(p.optimizationSpaceLowerBound)

        # define the callbacks for scipy
        def scipy_fun(x):
            evaluation = evaluator.evaluate([x], True,
                                            "function-evaluation")[0]
            if isinstance(evaluation, ErroredEvaluation):
                result.log("Got a ErroredEvaluation: " + evaluation.reason)
                result.log(evaluator.getStatistics())
                return

            return evaluation.getNumpyArrayLike(target) - targetdata

        def jac_fun(x):
            jacobi_result = self.getJacobiMatrix(x, evaluator, target, result)
            if jacobi_result is None:
                result.log(
                    "Error calculating Jacobi matrix, UG run did not finish")
                result.log(evaluator.getStatistics())
                return

            V, measurementEvaluation = jacobi_result
            return V

        scipy_result = scipy.optimize.least_squares(scipy_fun,
                                                    guess,
                                                    jac=jac_fun,
                                                    bounds=bounds)

        result.log("point is " + str(scipy_result.x))
        result.log("cost is " + str(scipy_result.cost))

        result.log(evaluator.getStatistics())
        result.save()

        return result
    def run(self, evaluator, initial_parameters, target, result=Result()):

        guess = initial_parameters

        evaluator.resultobj = result

        result.addRunMetadata("target", target)
        result.addRunMetadata("optimizertype", type(self).__name__)
        result.addRunMetadata("linesearchmethod",
                              type(self.linesearchmethod).__name__)
        result.addRunMetadata("epsilon", self.finite_differencing_epsilon)
        result.addRunMetadata("differencing", self.differencing.value)
        result.addRunMetadata("fixedparameters", evaluator.fixedparameters)
        result.addRunMetadata("parametermanager", evaluator.parametermanager)

        result.log("-- Starting newton method. --")

        targetdata = target.getNumpyArray()

        last_S = -1
        first_S = -1

        for i in range(self.maxiterations):

            jacobi_result = self.getJacobiMatrix(guess, evaluator, target,
                                                 result)
            if jacobi_result is None:
                result.log(
                    "Error calculating Jacobi matrix, UG run did not finish")
                result.log(evaluator.getStatistics())
                result.save()
                return

            V, measurementEvaluation = jacobi_result
            measurement = measurementEvaluation.getNumpyArrayLike(target)

            r = measurement - targetdata

            S = 0.5 * r.dot(r)

            # save the residualnorm S for calculation of the relative reduction
            if first_S == -1:
                first_S = S

            n = len(targetdata)
            p = len(guess)
            dof = n - p

            # calculate s^2 = residual mean square / variance estimate (p.6 Bates/Watts)
            variance = S / dof

            result.addMetric("residuals", r)
            result.addMetric("residualnorm", S)
            result.addMetric("parameters", guess)
            result.addMetric("jacobian", V)
            result.addMetric("variance", variance)
            result.addMetric("measurement", measurement)
            result.addMetric("measurementEvaluation", measurementEvaluation)

            if (last_S != -1):
                result.addMetric("reduction", S / last_S)

            result.log("[" + str(i) + "]: x=" + str(guess) +
                       ", residual norm S=" + str(S))

            # calculate Gauss-Newton step direction (p. 40)

            delta = -V.transpose().dot(r)

            result.log("stepdirection is " + str(delta))

            # cancel the optimization when the reduction of the norm of the residuals is below the threshhold
            if S / first_S < self.minreduction:
                result.log("-- Gradient descent method converged. --")
                result.commitIteration()
                break

            # do linesearch in the gauss-newton search direction
            nextguess = self.linesearchmethod.doLineSearch(
                delta, guess, target, V, r, result)[0]

            if (nextguess is None):
                result.log("-- Gradient descent method did not converge. --")
                result.commitIteration()
                result.log(evaluator.getStatistics())
                result.save()
                return result

            result.commitIteration()

            guess = nextguess
            last_S = S

        if (i == self.maxiterations - 1):
            result.log("-- Gradient descent method did not converge. --")

        result.log(evaluator.getStatistics())
        result.save()
        return result
示例#9
0
    def run(self, evaluator, initial_parameters, target, result=Result()):

        guess = initial_parameters

        evaluator.resultobj = result

        result.addRunMetadata("target", target)
        result.addRunMetadata("optimizertype", type(self).__name__)
        result.addRunMetadata("linesearchmethod",
                              type(self.linesearchmethod).__name__)
        result.addRunMetadata("epsilon", self.finite_differencing_epsilon)
        result.addRunMetadata("differencing", self.differencing.value)
        result.addRunMetadata("fixedparameters", evaluator.fixedparameters)
        result.addRunMetadata("parametermanager", evaluator.parametermanager)

        result.log("-- Starting newton method. --")

        targetdata = target.getNumpyArray()

        last_S = -1
        first_S = -1

        for i in range(self.maxiterations):

            jacobi_result = self.getJacobiMatrix(guess, evaluator, target,
                                                 result)
            if jacobi_result is None:
                result.log(
                    "Error calculating Jacobi matrix, UG run did not finish")
                result.log(evaluator.getStatistics())
                result.save()
                return

            V, measurementEvaluation = jacobi_result
            measurement = measurementEvaluation.getNumpyArrayLike(target)

            r = measurement - targetdata

            S = 0.5 * r.dot(r)

            # save the residualnorm S for calculation of the relative reduction
            if first_S == -1:
                first_S = S

            n = len(targetdata)
            p = len(guess)
            dof = n - p

            # calculate s^2 = residual mean square / variance estimate (p.6 Bates/Watts)
            variance = None if dof == 0 else S / dof

            result.addMetric("residuals", r)
            result.addMetric("residualnorm", S)
            result.addMetric("parameters", guess)
            result.addMetric("jacobian", V)
            result.addMetric("variance", variance)
            result.addMetric("measurement", measurement)
            result.addMetric("measurementEvaluation", measurementEvaluation)

            if (last_S != -1):
                result.addMetric("reduction", S / last_S)

            result.log("[" + str(i) + "]: x=" + str(guess) +
                       ", residual norm S=" + str(S))

            # calculate Gauss-Newton step direction (p. 40)
            Q1, R1 = np.linalg.qr(V, mode='reduced')
            w = Q1.transpose().dot(r)
            delta = -np.linalg.solve(R1, w)

            result.log("stepdirection is " + str(delta))

            # approximation of the hessian (X^T * X)^-1 = (R1^T * R1)^-1
            hessian = np.linalg.inv(np.matmul(np.transpose(R1), R1))
            covariance_matrix = variance * hessian

            result.addMetric("covariance", covariance_matrix)
            result.addMetric("hessian", hessian)

            # construct correlation matrix (see p. 22 of Bates/Watts)
            R1inv = np.linalg.inv(R1)
            Dinv = np.diag(1 / np.sqrt(np.diag(hessian)))
            L = np.matmul(Dinv, R1inv)
            C = np.matmul(L, np.transpose(L))
            result.addMetric("correlation", C)

            # calculate standard error for the parameters (p.21)
            s = np.sqrt(variance)
            errors = s * np.linalg.norm(R1inv, axis=1)
            result.addMetric("errors", errors)

            # cancel the optimization when the reduction of the norm of the residuals is below the threshhold and
            # the confidence of the calibrated parameters is sufficiently low
            if (S / first_S < self.minreduction):
                result.log("-- Newton method converged. --")
                result.commitIteration()
                break

            # do linesearch in the gauss-newton search direction
            nextguess = self.linesearchmethod.doLineSearch(
                delta, guess, target, V, r, result)[0]

            if (nextguess is None):
                result.log("-- Newton method did not converge. --")
                result.commitIteration()
                result.log(evaluator.getStatistics())
                result.save()
                return result

            result.commitIteration()

            guess = nextguess
            last_S = S

        if (i == self.maxiterations - 1):
            result.log("-- Newton method did not converge. --")

        result.log(evaluator.getStatistics())
        result.save()
        return result