示例#1
0
    def decorator(func):
        """Decorate a function to define its custome gradient(s).

        Parameters
        ----------
        func : callable
            Function whose gradients will be assigned by grad_funcs.

        Returns
        -------
        _ : callable
            Function func with gradients specified by grad_funcs.
        """

        def func_with_grad(*args, **kwargs):
            def grad(upstream):
                grad_vals = []
                for grad_fun in grad_funcs:
                    grads = _tf.convert_to_tensor(grad_fun(*args, **kwargs))
                    if isinstance(grads, float):
                        grad_val = upstream * grads
                    elif grads.ndim == 2:
                        grad_val = upstream[..., None] * grads
                    elif grads.ndim == 3:
                        grad_val = upstream[..., None, None] * grads
                    else:
                        grad_val = upstream * grads
                    grad_vals.append(grad_val)
                return tuple(grad_vals)

            return func(*args, **kwargs), grad

        return _tf.custom_gradient(func_with_grad)
示例#2
0
def tensorflow_register(f, s_f):
    """Register a function and its sensitivity for TensorFlow.

    Args:
        f (function): Function to register.
        s_f (function): Sensitivity of `f`.

    Returns:
        function: TensorFlow primitive.
    """
    @wraps(f)
    def primitive(*args, **kw_args):
        # TODO: This assumes that the output is of the data type of the first input.
        #  Generally, this is *not* true. How to best approach this?
        y = _np_apply(f, args[0].dtype, *args, **kw_args)

        def grad(s_y):
            # TODO: This assumes that the sensitivities of the inputs are of the data
            # types of the inputs. Again, generally, this is *not* true. How to best
            # approach this?
            return _np_apply(s_f, [arg.dtype for arg in args],
                             *((s_y, y) + args), **kw_args)

        return y, grad

    return tf.custom_gradient(primitive)
示例#3
0
 def wrapped(self, *args, **kwargs):
     if not hasattr(self, '_tf_custom_gradient_wrappers'):
         self._tf_custom_gradient_wrappers = {}
     if f not in self._tf_custom_gradient_wrappers:
         self._tf_custom_gradient_wrappers[f] = tf.custom_gradient(lambda *a: f(self, *a)) # kwargs currently only supported in eager mode
     #     self._tf_custom_gradient_wrappers[f] = tf.custom_gradient(lambda *a, **kw: f(self, *a, **kw))
     return self._tf_custom_gradient_wrappers[f](*args)  # kwargs currently only supported in eager mode
 def wrapped(self, *args, **kwargs):
     if not hasattr(self, '_tf_custom_gradient_wrappers'):
         self._tf_custom_gradient_wrappers = {}
     if f not in self._tf_custom_gradient_wrappers:
         self._tf_custom_gradient_wrappers[f] = tf.custom_gradient(
             lambda *a, **kw: f(self, *a, **kw))
     return self._tf_custom_gradient_wrappers[f](*args, **kwargs)
示例#5
0
    def __init__(self, step_size=0.5, horizon=5., threshold=1e-3, name='RicattiSolver'):
        super(NaiveSolver, self).__init__(name=name)
        self.step_size = tf.cast(step_size, float)
        self.horizon = tf.cast(horizon, float)
        self.threshold = tf.cast(threshold, float)
        self.solver = Euler(self.step_size, self.threshold, self.horizon)

        self._routine = tf.custom_gradient(
            lambda A, B: self._routine_without_custom_grad(self.solver, A, B))
示例#6
0
    def wrapper(func):
        def func_with_grad(*args, **kwargs):
            def grad(upstream):
                grad_vals = []
                for grad_fun in grad_funcs:
                    grads = tf.convert_to_tensor(grad_fun(*args, **kwargs))
                    if isinstance(grads, float):
                        grad_val = upstream * grads
                    elif grads.ndim == 2:
                        grad_val = upstream[..., None] * grads
                    elif grads.ndim == 3:
                        grad_val = upstream[..., None, None] * grads
                    else:
                        grad_val = upstream * grads
                    grad_vals.append(grad_val)
                return tuple(grad_vals)

            return func(*args, **kwargs), grad

        return tf.custom_gradient(func_with_grad)
示例#7
0
    def __call__(self, *parameters, solver_args={}):
        """Solve problem (or a batch of problems) corresponding to `parameters`

        Args:
          parameters: a sequence of tf.Tensors; the n-th Tensor specifies
                      the value for the n-th CVXPY Parameter. These Tensors
                      can be batched: if a Tensor has 3 dimensions, then its
                      first dimension is interpreted as the batch size.
          solver_args: a dict of optional arguments, to send to `diffcp`. Keys
                       should be the names of keyword arguments.

        Returns:
          a list of optimal variable values, one for each CVXPY Variable
          supplied to the constructor.
        """
        if len(parameters) != len(self.params):
            raise ValueError('A value must be provided for each CVXPY '
                             'parameter; received %d values, expected %d' % (
                                 len(parameters), len(self.params)))
        compute = tf.custom_gradient(
            lambda *parameters: self._compute(parameters, solver_args))
        return compute(*parameters)
def combine_value_gradient(xv, xg):
    # 値とgradientで別のフローを適用する
    return tf.stop_gradient(xv) + tf.custom_gradient(
        lambda x: [K.zeros_like(x), lambda dy: dy])(xg)