예제 #1
0
 def clone(self):
     network = Network()
     network.inputs = [numpy_copy(a) for a in self.inputs]
     network.activations = [numpy_copy(a) for a in self.activations]
     network.weights = [numpy_copy(a) for a in self.weights]
     network.biases = [numpy_copy(a) for a in self.biases]
     network.learning_rate = self.learning_rate
     network.name = self.name
     return network
예제 #2
0
def finite_difference_jacobian(residual_func, residual_value, state, offset_rel=1.e-5, offset_abs=1.e-8):
    """
    Compute a simple one-sided finite difference approximation to the Jacobian of a residual function.

    Parameters
    ----------
    residual_func : callable f(q)->r
        residual function of the state vector, r(q)
    residual_value : np.array
        value of the residual function at the specified state vector
    state : np.array
        the state vector
    offset_rel : float
        the relative contribution to the finite difference delta (optional, default: 1e-5)
    offset_abs : float
        the absolute contribution to the finite difference delta (optional, default: 1e-8)
    Returns
    -------
    j : np.ndarray
        the approximate Jacobian matrix

    """
    neq = state.size
    j = np.ndarray((neq, neq))
    for i in range(neq):
        state_offset = numpy_copy(state)
        offset = offset_rel * np.abs(state[i]) + offset_abs
        state_offset[i] += offset
        j[:, i] = (residual_func(state_offset) - residual_value) / offset
    return j
예제 #3
0
    def __call__(self,
                 residual_method,
                 setup_method,
                 solve_method,
                 initial_guess,
                 initial_rhs):
        """
        Solve a nonlinear problem and return the result.

        Parameters
        ----------
        residual_method : callable f(x)->(res,rhs)
            residual function of the solution that returns both the residual and right-hand side of an ODE,
            to use this on a non-ODE problem simply have your residual method return as residual, None
        setup_method : callable f(x)
            setup function of the solution for the linear projector (e.g. Jacobian eval and factorize)
        solve_method : callabe f(x)->b
            linear projector solver function of a residual alone,
            which returns the state update, linear solver iterations, and whether or not the linear solver converged
        initial_guess : np.array
            initial guess for the solution
        initial_rhs : np.array
            ODE right-hand side for the problem at the initial guess (to avoid re-evaluation)
        Returns
        -------
        sol : np.array
            solution to the nonlinear problem

        """

        solution = numpy_copy(initial_guess)
        self._check_for_naninf(solution, 'NaN or Inf detected in Simple Newton Solve: in the initial solution!')
        self._run_custom_solution_check(solution, 'In the initial solution')

        this_is_a_scalar_problem = True if solution.size == 1 else False
        norm_method = abs if this_is_a_scalar_problem else lambda x: norm(x, ord=self.norm_order)

        residual, rhs = residual_method(solution, existing_rhs=initial_rhs, evaluate_new_rhs=False)
        self._check_for_naninf(residual, 'NaN or Inf detected in Simple Newton Solve: in the initial residual!')

        projector_setups = 0
        total_linear_iter = 0

        for iteration_count in range(1, self.max_nonlinear_iter + 1):
            if self.evaluate_jacobian_every_iter:
                setup_method(solution)
                projector_setups += 1
            dstate, this_linear_solver_iter, linear_converged = solve_method(residual)

            debug_string = 'On iteration ' + str(iteration_count) + ', linear solve convergence: ' + str(
                linear_converged) + ' in ' + str(this_linear_solver_iter) + ' iterations'

            self._check_for_naninf(dstate,
                                   'NaN or Inf detected in Simple Newton Solve: solution update check! ' + debug_string)
            total_linear_iter += this_linear_solver_iter
            solution -= dstate
            self._check_for_naninf(solution,
                                   'NaN or Inf detected in Simple Newton Solve: solution check! ' + debug_string)
            self._run_custom_solution_check(solution, debug_string)

            residual, rhs = residual_method(solution, evaluate_new_rhs=True)
            self._check_for_naninf(residual,
                                   'NaN or Inf detected in Simple Newton Solve: solution check! ' + debug_string)

            if norm_method(residual * self.norm_weighting) < self.tolerance:
                return SolverOutput(slow_convergence=iteration_count > self.slowness_detection_iter,
                                    solution=solution,
                                    rhs_at_converged=rhs,
                                    iter=iteration_count,
                                    liter=total_linear_iter,
                                    converged=True,
                                    projector_setups=projector_setups)

        if self.must_converge:
            raise ValueError('Simple Newton method did not converge and must_converge=True!')
        else:
            return SolverOutput(
                solution=solution,
                rhs_at_converged=rhs,
                iter=self.max_nonlinear_iter,
                liter=total_linear_iter,
                converged=False,
                slow_convergence=True,
                projector_setups=projector_setups)
예제 #4
0
 def run_encoding(self, data, encoding_layer):
     self.run(data)
     return numpy_copy(transpose(self.activations[encoding_layer])[0])
예제 #5
0
 def doubled(x, *args, **kwargs):
     output = fun(x, *args, **kwargs)
     return output, numpy_copy(output)