def forward_xp(self, inputs, xp): a, = inputs # This cast was introduced in order to avoid decreasing precision. # ex.) numpy.sqrt(x) becomes a float16 array where x is an int8 array. a = dtype_utils.cast_if_numpy_array(xp, a, self.out_dtype) with IgnoreNumpyFloatingPointError(): y = self.func(xp, a) y = dtype_utils.cast_if_numpy_array(xp, y, self.out_dtype) return y,
def forward_xp(self, inputs, xp): a, = inputs # This cast was introduced in order to avoid decreasing precision. # ex.) numpy.sqrt(x) becomes a float16 array where x is an int8 array. a = dtype_utils.cast_if_numpy_array(xp, a, self.out_dtype) with IgnoreNumpyFloatingPointError(): y = self.func(xp, a) y = dtype_utils.cast_if_numpy_array(xp, y, self.out_dtype) return y,
def test_where_scalar_scalar(xp, cond_shape, cond_dtype, in_types, out_dtype): cond = xp.array(_random_condition(cond_shape, cond_dtype)) x_type, y_type = in_types x = x_type(0) y = y_type(2) out = xp.where(cond, x, y) return dtype_utils.cast_if_numpy_array(xp, out, out_dtype)
def test_eye(xp, N, M, k, dtype_spec, device): if xp is numpy and isinstance(dtype_spec, chainerx.dtype): dtype_spec = dtype_spec.name out = xp.eye(N, M, k, dtype_spec) if dtype_spec in (None, Unspecified): out = dtype_utils.cast_if_numpy_array(xp, out, 'float32') return out
def test_eye(xp, N, M, k, dtype_spec, device): if xp is numpy and isinstance(dtype_spec, chainerx.dtype): dtype_spec = dtype_spec.name out = xp.eye(N, M, k, dtype_spec) if dtype_spec in (None, Unspecified): out = dtype_utils.cast_if_numpy_array(xp, out, 'float32') return out
def forward_xp(self, inputs, xp): a, b = inputs if self.is_module: y = xp.dot(a, b) else: y = a.dot(b) y = dtype_utils.cast_if_numpy_array(xp, y, self.chx_expected_dtype) return y,
def test_sqrt(xp, device, input, in_dtype, out_dtype): if (input.size > 0 and not numpy.isfinite(input).all() and numpy.dtype(in_dtype).kind != 'f'): return chainerx.testing.ignore() a = xp.array(input.astype(in_dtype)) a = dtype_utils.cast_if_numpy_array(xp, a, out_dtype) return xp.sqrt(a)
def test_sqrt(xp, device, input, in_dtype, out_dtype): if (input.size > 0 and not numpy.isfinite(input).all() and numpy.dtype(in_dtype).kind != 'f'): return chainerx.testing.ignore() a = xp.array(input.astype(in_dtype)) a = dtype_utils.cast_if_numpy_array(xp, a, out_dtype) return xp.sqrt(a)
def forward_xp(self, inputs, xp): a, b = inputs if self.is_module: y = xp.dot(a, b) else: y = a.dot(b) y = dtype_utils.cast_if_numpy_array(xp, y, self.chx_expected_dtype) return y,
def forward_xp(self, inputs, xp): if self.axis is _unspecified: b = xp.concatenate(inputs) else: b = xp.concatenate(inputs, self.axis) if self.chx_expected_dtype is not None: b = dtype_utils.cast_if_numpy_array(xp, b, self.chx_expected_dtype) return b,
def forward_xp(self, inputs, xp): if self.axis is _unspecified: b = xp.concatenate(inputs) else: b = xp.concatenate(inputs, self.axis) if self.chx_expected_dtype is not None: b = dtype_utils.cast_if_numpy_array(xp, b, self.chx_expected_dtype) return b,
def test_where_scalar_scalar(xp, cond_shape, cond_dtype, in_types, out_dtype): cond = _random_condition(cond_shape, cond_dtype, random_state=numpy.random.RandomState(seed=0)) cond = xp.array(cond) x_type, y_type = in_types x = x_type(0) y = y_type(2) out = xp.where(cond, x, y) return dtype_utils.cast_if_numpy_array(xp, out, out_dtype)
def test_ones(xp, shape_as_tuple_or_int, dtype_spec, device): if xp is numpy and isinstance(dtype_spec, chainerx.dtype): dtype_spec = dtype_spec.name if dtype_spec is Unspecified: out = xp.ones(shape_as_tuple_or_int) else: out = xp.ones(shape_as_tuple_or_int, dtype_spec) if dtype_spec in (None, Unspecified): out = dtype_utils.cast_if_numpy_array(xp, out, 'float32') return out
def test_ones(xp, shape_as_tuple_or_int, dtype_spec, device): if xp is numpy and isinstance(dtype_spec, chainerx.dtype): dtype_spec = dtype_spec.name if dtype_spec is Unspecified: out = xp.ones(shape_as_tuple_or_int) else: out = xp.ones(shape_as_tuple_or_int, dtype_spec) if dtype_spec in (None, Unspecified): out = dtype_utils.cast_if_numpy_array(xp, out, 'float32') return out
def test_empty(xp, shape_as_tuple_or_int, dtype_spec, device): if xp is numpy and isinstance(dtype_spec, chainerx.dtype): dtype_spec = dtype_spec.name if dtype_spec is Unspecified: a = xp.empty(shape_as_tuple_or_int) else: a = xp.empty(shape_as_tuple_or_int, dtype_spec) a.fill(0) if dtype_spec in (None, Unspecified): a = dtype_utils.cast_if_numpy_array(xp, a, 'float32') return a
def test_empty(xp, shape_as_tuple_or_int, dtype_spec, device): if xp is numpy and isinstance(dtype_spec, chainerx.dtype): dtype_spec = dtype_spec.name if dtype_spec is Unspecified: a = xp.empty(shape_as_tuple_or_int) else: a = xp.empty(shape_as_tuple_or_int, dtype_spec) a.fill(0) if dtype_spec in (None, Unspecified): a = dtype_utils.cast_if_numpy_array(xp, a, 'float32') return a
def apply_func(is_module, func, xp, device, input, axis, dtypes): (in_dtype, ), out_dtype = dtypes try: a_np = input.astype(in_dtype) except (ValueError, OverflowError): return xp.zeros(()) # invalid combination of data and dtype a = xp.array(a_np) a = func(is_module, xp, a, axis) if xp is numpy: a = dtype_utils.cast_if_numpy_array(xp, a, out_dtype) return a
def forward_xp(self, inputs, xp): a, b = inputs b = dtype_utils.cast_if_numpy_array(xp, b, a.dtype) if xp is chainerx: a_ = a.as_grad_stopped().copy() b_ = b.as_grad_stopped() else: a_ = a.copy() b_ = b with IgnoreNumpyFloatingPointError(): ret = self.func(xp, a_, b_) assert ret is None # func should not return anything return a_,
def test_arange_stop(xp, stop, dtype_spec, device): # TODO(hvy): xp.arange(True) should return an ndarray of type int64 if xp is numpy and isinstance(dtype_spec, chainerx.dtype): dtype_spec = dtype_spec.name # Checked in test_invalid_arange_too_long_bool if _is_bool_spec(dtype_spec) and stop > 2: return chainerx.testing.ignore() if isinstance(stop, bool) and dtype_spec is None: # TODO(niboshi): This pattern needs dtype promotion. return chainerx.testing.ignore() out = xp.arange(stop, dtype=dtype_spec) if dtype_spec in (None, Unspecified): expected_dtype = _get_default_dtype(stop) out = dtype_utils.cast_if_numpy_array(xp, out, expected_dtype) return out
def test_arange_stop(xp, stop, dtype_spec, device): # TODO(hvy): xp.arange(True) should return an ndarray of type int64 if xp is numpy and isinstance(dtype_spec, chainerx.dtype): dtype_spec = dtype_spec.name # Checked in test_invalid_arange_too_long_bool if _is_bool_spec(dtype_spec) and stop > 2: return chainerx.testing.ignore() if isinstance(stop, bool) and dtype_spec is None: # TODO(niboshi): This pattern needs dtype promotion. return chainerx.testing.ignore() out = xp.arange(stop, dtype=dtype_spec) if dtype_spec in (None, Unspecified): expected_dtype = _get_default_dtype(stop) out = dtype_utils.cast_if_numpy_array(xp, out, expected_dtype) return out
def test_arange_start_stop_step(xp, start, stop, step, dtype_spec, device): if xp is numpy and isinstance(dtype_spec, chainerx.dtype): dtype_spec = dtype_spec.name # Checked in test_invalid_arange_too_long_bool if _is_bool_spec(dtype_spec) and abs((stop - start) / step) > 2: return chainerx.testing.ignore() if ((isinstance(start, bool) or isinstance(stop, bool) or isinstance(step, bool)) and dtype_spec is None): # TODO(niboshi): This pattern needs dtype promotion. return chainerx.testing.ignore() out = xp.arange(start, stop, step, dtype=dtype_spec) if dtype_spec in (None, Unspecified): expected_dtype = _get_default_dtype(step) out = dtype_utils.cast_if_numpy_array(xp, out, expected_dtype) return out
def test_arange_start_stop_step(xp, start, stop, step, dtype_spec, device): if xp is numpy and isinstance(dtype_spec, chainerx.dtype): dtype_spec = dtype_spec.name # Checked in test_invalid_arange_too_long_bool if _is_bool_spec(dtype_spec) and abs((stop - start) / step) > 2: return chainerx.testing.ignore() if ((isinstance(start, bool) or isinstance(stop, bool) or isinstance(step, bool)) and dtype_spec is None): # TODO(niboshi): This pattern needs dtype promotion. return chainerx.testing.ignore() out = xp.arange(start, stop, step, dtype=dtype_spec) if dtype_spec in (None, Unspecified): expected_dtype = _get_default_dtype(step) out = dtype_utils.cast_if_numpy_array(xp, out, expected_dtype) return out
def test_dot(is_module, xp, device, a_shape, b_shape, dtypes, chx_expected_dtype): # TODO(beam2d): Remove the skip after supporting non-float dot on CUDA if (device.name == 'cuda:0' and any(numpy.dtype(dtype).kind != 'f' for dtype in dtypes)): pytest.skip('CUDA dot only supports floating kind dtypes.') a_dtype, b_dtype = dtypes a = array_utils.create_dummy_ndarray(xp, a_shape, a_dtype) b = array_utils.create_dummy_ndarray(xp, b_shape, b_dtype) if is_module: y = xp.dot(a, b) else: y = a.dot(b) return dtype_utils.cast_if_numpy_array(xp, y, chx_expected_dtype)
def test_rounding_routines(func, xp, device, input, dtypes): (in_dtype, ), out_dtype = dtypes a = xp.array(input.astype(in_dtype)) a = func(xp, a) a = dtype_utils.cast_if_numpy_array(xp, a, out_dtype) return a
def test_log(xp, device, input, in_dtype, out_dtype): a = xp.array(input.astype(in_dtype)) a = dtype_utils.cast_if_numpy_array(xp, a, out_dtype) return xp.log(a)
def test_concatenate_three_arrays_mixed_dtypes( xp, shapes, axis, dtypes, chx_expected_dtype): assert len(shapes) == 3 y = _concatenate(xp, shapes, dtypes, axis) return dtype_utils.cast_if_numpy_array(xp, y, chx_expected_dtype)
def test_full(xp, shape_as_tuple_or_int, value, device): out = xp.full(shape_as_tuple_or_int, value) return dtype_utils.cast_if_numpy_array(xp, out, _get_default_dtype(value))
def forward_xp(self, inputs, xp): x, = inputs x = dtype_utils.cast_if_numpy_array(xp, x, self.chx_dtype) return xp.tanh(x),
def test_full(xp, shape_as_tuple_or_int, value, device): out = xp.full(shape_as_tuple_or_int, value) return dtype_utils.cast_if_numpy_array(xp, out, _get_default_dtype(value))
def forward_xp(self, inputs, xp): x, y = inputs condition = xp.array(self.condition) o = xp.where(condition, x, y) o = dtype_utils.cast_if_numpy_array(xp, o, self.out_dtype) return o,
def forward_xp(self, inputs, xp): b = self.join(inputs, xp) if self.chx_expected_dtype is not None: b = dtype_utils.cast_if_numpy_array(xp, b, self.chx_expected_dtype) return b,
def forward_xp(self, inputs, xp): x, = inputs x = dtype_utils.cast_if_numpy_array(xp, x, self.chx_dtype) return xp.tanh(x),
def func(self, xp, a): if xp is numpy: out_dtype = dict(_expected_dtypes_math_functions).get(self.dtype) a = dtype_utils.cast_if_numpy_array(xp, a, out_dtype) return xp.cos(a)
def test_log(xp, device, input, in_dtype, out_dtype): a = xp.array(input.astype(in_dtype)) a = dtype_utils.cast_if_numpy_array(xp, a, out_dtype) return xp.log(a)
def forward_xp(self, inputs, xp): a, = inputs if self.out_dtype is not None: a = dtype_utils.cast_if_numpy_array(xp, a, self.out_dtype) y = self.func(xp, a) return y,