Exemplo n.º 1
0
 def new_method(self, *args, **kwargs):
     try:
         return base_method(self, *args, **kwargs)
     except unittest.SkipTest:
         raise
     except Exception as e:
         s = six.StringIO()
         s.write('Parameterized test failed.\n\n')
         s.write('Base test method: {}.{}\n'.format(
             base.__name__, base_method.__name__))
         s.write('Test parameters:\n')
         for k, v in six.iteritems(param2):
             s.write('  {}: {}\n'.format(k, v))
         utils._raise_from(e.__class__, s.getvalue(), e)
Exemplo n.º 2
0
 def new_method(self, *args, **kwargs):
     try:
         return base_method(self, *args, **kwargs)
     except unittest.SkipTest:
         raise
     except Exception as e:
         s = six.StringIO()
         s.write('Parameterized test failed.\n\n')
         s.write('Base test method: {}.{}\n'.format(
             base.__name__, base_method.__name__))
         s.write('Test parameters:\n')
         for k, v in six.iteritems(param2):
             s.write('  {}: {}\n'.format(k, v))
         utils._raise_from(e.__class__, s.getvalue(), e)
Exemplo n.º 3
0
 def new_method(self, *args, **kwargs):
     try:
         return base_method(self, *args, **kwargs)
     except unittest.SkipTest:
         raise
     except Exception as e:
         s = six.StringIO()
         s.write('Parameterized test failed.\n\n')
         s.write('Base test method: {}.{}\n'.format(
             base.__name__, base_method.__name__))
         s.write('Test parameters:\n')
         for k, v in sorted(param.items()):
             s.write('  {}: {}\n'.format(k, v))
         err_class = e.__class__
         if err_class.__name__ == 'OutOfMemoryError':
             err_class = MemoryError
         utils._raise_from(err_class, s.getvalue(), e)
Exemplo n.º 4
0
def check_double_backward(func,
                          x_data,
                          y_grad,
                          x_grad_grad,
                          params=(),
                          params_grad_grad=(),
                          eps=1e-3,
                          atol=1e-4,
                          rtol=1e-3,
                          no_grads=None,
                          dtype=None,
                          detect_nondifferentiable=False):
    """Test twice differentiation of a given procedure.

    This function automatically checks if the backward procedure of ``func``
    is correctly implemented for further differentiation. It first computes the
    gradient of ``func`` w.r.t. its inputs in the same way as
    :func:`~chainer.gradient_check.check_backward`. This function then further
    invokes the backward procedure against the gradient variables, starting
    from the initial gradient given by ``x_grad_grad``. It also computes the
    second gradient using :func:`~chainer.gradient_check.numerical_grad`. The
    resulting gradients are compared to confirm if the second-order gradients
    are approximately correct.

    Note that this function **DOES NOT** check if the first-order
    differentiation is correct; the numerical gradient assumes that the
    first-order gradient given by the usual :meth:`chainer.Variable.backward`
    is correct. The implementation of each differentiable function should be
    tested by :func:`~chainer.gradient_check.check_backward` first, and then
    should be tested by this function if necessary.

    For the details of the arguments, see
    :func:`~chainer.gradient_check.check_backward`. The additional arguments
    ``x_grad_grad`` and ``params_grad_grad`` are (tuples of)
    :class:`~chainer.Variable` (s) that include the initial gradient
    corresponding to the first-order gradient of each input and parameter. Note
    that the default error tolerance ``atol`` and ``rtol`` are slightly larger
    than those of :func:`~chainer.gradient_check.check_backward` because the
    numerical gradients of the second order differentiation are less accurate
    than those of the first order gradients.

    """
    # Rename variables
    xs = x_data
    gys = y_grad
    ggxs = x_grad_grad
    ggparams = params_grad_grad
    no_gxs = no_grads
    del x_data
    del y_grad
    del x_grad_grad
    del params_grad_grad
    del no_grads

    xs = _as_tuple(xs)
    params = _as_tuple(params)
    gys = _as_tuple(gys)
    ggxs = _as_tuple(ggxs)
    ggparams = _as_tuple(ggparams)
    n_x = len(xs)

    first_order_no_gxs = [x.dtype.kind != 'f' for x in xs]

    def first_order_grad(*inputs):
        xs = inputs[:n_x]
        gys = inputs[n_x:]

        ys = _as_tuple(func(*xs))

        # `gys` (inputs to `first_order_grad` forward function) may have been
        # casted to float64 by `numerical_grad`. For certain functions demoting
        # the dtypes (e.g. `F.cast` that casts to float16), the dtypes of `ys`
        # (e.g. outputs of `F.cast`) and `gys` (e.g. given by `numerical_grad`)
        # may mismatch and we need to align those dtypes here.
        gys = [
            None if gy is None else chainer.functions.cast(gy, y.dtype)
            for y, gy in zip(ys, gys)
        ]

        _check_outputs_and_grad_outputs(ys, gys)

        chainer.backward(ys, gys, enable_double_backprop=True)

        gxs = []
        errors = []
        for i, (no_gx, x) in enumerate(six.moves.zip(first_order_no_gxs, xs)):
            if no_gx:
                if x.grad is not None:
                    errors.append(
                        '[{}]: Gradient was calculated while expected to not.'.
                        format(i))
            else:
                if x.grad is None:
                    gxs.append(None)
                else:
                    gxs.append(x.grad_var)

        if len(errors) > 0:
            f = six.StringIO()
            f.write('There are errors retrieving first-order gradients:\n')
            f.write('Inputs: {}\n'.format(utils._format_array_props(xs)))
            f.write('Skip: {}\n'.format(', '.join(
                str(no_gx) for no_gx in first_order_no_gxs)))
            f.write('Errors:\n')
            for error in errors:
                f.write('{}\n'.format(error))
            raise RuntimeError(f.getvalue())

        return tuple(gxs + [p.grad_var for p in params])

    inputs = xs + gys
    grad_grad = ggxs + ggparams
    try:
        check_backward(first_order_grad,
                       inputs,
                       grad_grad,
                       params=params,
                       eps=eps,
                       atol=atol,
                       rtol=rtol,
                       no_grads=no_gxs,
                       dtype=dtype,
                       detect_nondifferentiable=detect_nondifferentiable)
    except AssertionError as e:
        f = six.StringIO()
        f.write('check_double_backward failed '
                '(eps={} atol={} rtol={})\n'.format(eps, atol, rtol))
        for i, x in enumerate(xs):
            f.write('input[{}]:\n'.format(i))
            f.write('{}\n'.format(x))
        for i, gy in enumerate(gys):
            f.write('grad_output[{}]:\n'.format(i))
            f.write('{}\n'.format(gy))
        for i, ggx in enumerate(ggxs):
            f.write('grad_grad_input[{}]:\n'.format(i))
            f.write('{}\n'.format(ggx))
        for i, ggp in enumerate(ggparams):
            f.write('grad_grad_param[{}]:\n'.format(i))
            f.write('{}\n'.format(ggp))
        f.write('\n')
        f.write(str(e))
        utils._raise_from(AssertionError, f.getvalue(), e)
Exemplo n.º 5
0
 def fail(cls, message, exc=None):
     if exc is not None:
         utils._raise_from(cls, message, exc)
     raise cls(message)
Exemplo n.º 6
0
 def fail(message, exc=None):
     if exc is not None:
         utils._raise_from(FunctionTestError, message, exc)
     raise FunctionTestError(message)
Exemplo n.º 7
0
 def fail(message, exc=None):
     if exc is not None:
         utils._raise_from(FunctionTestError, message, exc)
     raise FunctionTestError(message)
Exemplo n.º 8
0
 def fail(cls, message, exc=None):
     if exc is not None:
         utils._raise_from(cls, message, exc)
     raise cls(message)
Exemplo n.º 9
0
def check_double_backward(func,
                          x_data,
                          y_grad,
                          x_grad_grad,
                          params=(),
                          params_grad_grad=(),
                          eps=1e-3,
                          atol=1e-4,
                          rtol=1e-3,
                          no_grads=None,
                          dtype=None,
                          detect_nondifferentiable=False):
    """Test twice differentiation of a given procedure.

    This function automatically checks if the backward procedure of ``func``
    is correctly implemented for further differentiation. It first computes the
    gradient of ``func`` w.r.t. its inputs in the same way as
    :func:`~chainer.gradient_check.check_backward`. This function then further
    invokes the backward procedure against the gradient variables, starting
    from the initial gradient given by ``x_grad_grad``. It also computes the
    second gradient using :func:`~chainer.gradient_check.numerical_grad`. The
    resulting gradients are compared to confirm if the second-order gradients
    are approximately correct.

    Note that this function **DOES NOT** check if the first-order
    differentiation is correct; the numerical gradient assumes that the
    first-order gradient given by the usual :meth:`chainer.Variable.backward`
    is correct. The implementation of each differentiable function should be
    tested by :func:`~chainer.gradient_check.check_backward` first, and then
    should be tested by this function if neccessary.

    For the details of the arguments, see
    :func:`~chainer.gradient_check.check_backward`. The additional arguments
    ``x_grad_grad`` and ``params_grad_grad`` are (tuples of)
    :class:`~chainer.Variable` (s) that include the initial gradient
    corresponding to the first-order gradient of each input and parameter. Note
    that the default error tolerance ``atol`` and ``rtol`` are slightly larger
    than those of :func:`~chainer.gradient_check.check_backward` because the
    numerical gradients of the second order differentiation are less accurate
    than those of the first order gradients.

    """
    x_data = _as_tuple(x_data)
    params = _as_tuple(params)
    y_grad = _as_tuple(y_grad)
    x_grad_grad = _as_tuple(x_grad_grad)
    params_grad_grad = _as_tuple(params_grad_grad)
    n_x = len(x_data)

    first_order_no_grads = [x.dtype.kind != 'f' for x in x_data]

    def first_order_grad(*inputs):
        xs = inputs[:n_x]
        gys = inputs[n_x:]

        y = _as_tuple(func(*xs))
        _check_outputs_and_grad_outputs(y, gys)

        # Let all elements of y share the same creator.
        # See the comment in check_backward.
        y = _apply_grad_setter_func(y, gys)

        y.backward(enable_double_backprop=True)

        gxs = []
        errors = []
        for i, (skip, x) in enumerate(six.moves.zip(first_order_no_grads, xs)):
            if skip:
                if x.grad is not None:
                    errors.append(
                        '[{}]: Gradient was calculated while expected to not.'.
                        format(i))
            else:
                if x.grad is None:
                    gxs.append(None)
                else:
                    gxs.append(x.grad_var)

        if len(errors) > 0:
            f = six.StringIO()
            f.write('There are errors retrieving first-order gradients:\n')
            f.write('Inputs: {}\n'.format(utils._format_array_props(xs)))
            f.write('Skip: {}\n'.format(', '.join(
                str(skip) for skip in first_order_no_grads)))
            f.write('Errors:\n')
            for error in errors:
                f.write('{}\n'.format(error))
            raise RuntimeError(f.getvalue())

        return tuple(gxs + [p.grad_var for p in params])

    inputs = x_data + y_grad
    grad_grad = x_grad_grad + params_grad_grad
    try:
        check_backward(first_order_grad,
                       inputs,
                       grad_grad,
                       params=params,
                       eps=eps,
                       atol=atol,
                       rtol=rtol,
                       no_grads=no_grads,
                       dtype=dtype,
                       detect_nondifferentiable=detect_nondifferentiable)
    except AssertionError as e:
        f = six.StringIO()
        f.write('check_double_backward failed '
                '(eps={} atol={} rtol={})\n'.format(eps, atol, rtol))
        for i, x_ in enumerate(x_data):
            f.write('input[{}]:\n'.format(i))
            f.write('{}\n'.format(x_))
        for i, gy_ in enumerate(y_grad):
            f.write('grad_output[{}]:\n'.format(i))
            f.write('{}\n'.format(gy_))
        for i, ggx_ in enumerate(x_grad_grad):
            f.write('grad_grad_input[{}]:\n'.format(i))
            f.write('{}\n'.format(ggx_))
        for i, ggp_ in enumerate(params_grad_grad):
            f.write('grad_grad_param[{}]:\n'.format(i))
            f.write('{}\n'.format(ggp_))
        f.write('\n')
        f.write(str(e))
        utils._raise_from(AssertionError, f.getvalue(), e)
Exemplo n.º 10
0
def check_double_backward(func, x_data, y_grad, x_grad_grad, params=(),
                          params_grad_grad=(), eps=1e-3, atol=1e-4, rtol=1e-3,
                          no_grads=None, dtype=None,
                          detect_nondifferentiable=False):
    """Test twice differentiation of a given procedure.

    This function automatically checks if the backward procedure of ``func``
    is correctly implemented for further differentiation. It first computes the
    gradient of ``func`` w.r.t. its inputs in the same way as
    :func:`~chainer.gradient_check.check_backward`. This function then further
    invokes the backward procedure against the gradient variables, starting
    from the initial gradient given by ``x_grad_grad``. It also computes the
    second gradient using :func:`~chainer.gradient_check.numerical_grad`. The
    resulting gradients are compared to confirm if the second-order gradients
    are approximately correct.

    Note that this function **DOES NOT** check if the first-order
    differentiation is correct; the numerical gradient assumes that the
    first-order gradient given by the usual :meth:`chainer.Variable.backward`
    is correct. The implementation of each differentiable function should be
    tested by :func:`~chainer.gradient_check.check_backward` first, and then
    should be tested by this function if neccessary.

    For the details of the arguments, see
    :func:`~chainer.gradient_check.check_backward`. The additional arguments
    ``x_grad_grad`` and ``params_grad_grad`` are (tuples of)
    :class:`~chainer.Variable` (s) that include the initial gradient
    corresponding to the first-order gradient of each input and parameter. Note
    that the default error tolerance ``atol`` and ``rtol`` are slightly larger
    than those of :func:`~chainer.gradient_check.check_backward` because the
    numerical gradients of the second order differentiation are less accurate
    than those of the first order gradients.

    """
    # Rename variables
    xs = x_data
    gys = y_grad
    ggxs = x_grad_grad
    ggparams = params_grad_grad
    no_gxs = no_grads
    del x_data
    del y_grad
    del x_grad_grad
    del params_grad_grad
    del no_grads

    xs = _as_tuple(xs)
    params = _as_tuple(params)
    gys = _as_tuple(gys)
    ggxs = _as_tuple(ggxs)
    ggparams = _as_tuple(ggparams)
    n_x = len(xs)

    first_order_no_gxs = [x.dtype.kind != 'f' for x in xs]

    def first_order_grad(*inputs):
        xs = inputs[:n_x]
        gys = inputs[n_x:]

        ys = _as_tuple(func(*xs))
        _check_outputs_and_grad_outputs(ys, gys)

        # Let all elements of y share the same creator.
        # See the comment in check_backward.
        y_backward = _apply_grad_setter_func(ys, gys)

        y_backward.backward(enable_double_backprop=True)

        gxs = []
        errors = []
        for i, (no_gx, x) in enumerate(six.moves.zip(first_order_no_gxs, xs)):
            if no_gx:
                if x.grad is not None:
                    errors.append(
                        '[{}]: Gradient was calculated while expected to not.'
                        .format(i))
            else:
                if x.grad is None:
                    gxs.append(None)
                else:
                    gxs.append(x.grad_var)

        if len(errors) > 0:
            f = six.StringIO()
            f.write('There are errors retrieving first-order gradients:\n')
            f.write('Inputs: {}\n'.format(utils._format_array_props(xs)))
            f.write('Skip: {}\n'.format(
                ', '.join(str(no_gx) for no_gx in first_order_no_gxs)))
            f.write('Errors:\n')
            for error in errors:
                f.write('{}\n'.format(error))
            raise RuntimeError(f.getvalue())

        return tuple(gxs + [p.grad_var for p in params])

    inputs = xs + gys
    grad_grad = ggxs + ggparams
    try:
        check_backward(first_order_grad, inputs, grad_grad, params=params,
                       eps=eps, atol=atol, rtol=rtol, no_grads=no_gxs,
                       dtype=dtype,
                       detect_nondifferentiable=detect_nondifferentiable)
    except AssertionError as e:
        f = six.StringIO()
        f.write('check_double_backward failed '
                '(eps={} atol={} rtol={})\n'.format(eps, atol, rtol))
        for i, x in enumerate(xs):
            f.write('input[{}]:\n'.format(i))
            f.write('{}\n'.format(x))
        for i, gy in enumerate(gys):
            f.write('grad_output[{}]:\n'.format(i))
            f.write('{}\n'.format(gy))
        for i, ggx in enumerate(ggxs):
            f.write('grad_grad_input[{}]:\n'.format(i))
            f.write('{}\n'.format(ggx))
        for i, ggp in enumerate(ggparams):
            f.write('grad_grad_param[{}]:\n'.format(i))
            f.write('{}\n'.format(ggp))
        f.write('\n')
        f.write(str(e))
        utils._raise_from(AssertionError, f.getvalue(), e)