def test_disk(self): """regression test of img-1194, footprint = [1] Test peak.peak_local_max when every point is a local maximum """ image = cp.asarray(np.random.uniform(size=(10, 20))) footprint = cp.asarray([[1]]) with expected_warnings(["indices argument is deprecated"]): result = peak.peak_local_max( image, labels=cp.ones((10, 20), dtype=int), footprint=footprint, min_distance=1, threshold_rel=0, threshold_abs=-1, indices=False, exclude_border=False, ) assert cp.all(result) result = peak.peak_local_max( image, footprint=footprint, threshold_abs=-1, indices=False, exclude_border=False, ) assert cp.all(result)
def test_structural_similarity_grad(seed): N = 30 # NOTE: This test is known to randomly fail on some systems (Mac OS X 10.6) # And when testing tests in parallel. Therefore, we choose a few # seeds that are known to work. # The likely cause of this failure is that we are setting a hard # threshold on the value of the gradient. Often the computed gradient # is only slightly larger than what was measured. # X = cp.random.rand(N, N) * 255 # Y = cp.random.rand(N, N) * 255 rnd = np.random.RandomState(seed) X = cp.asarray(rnd.rand(N, N) * 255) Y = cp.asarray(rnd.rand(N, N) * 255) f = structural_similarity(X, Y, data_range=255) g = structural_similarity(X, Y, data_range=255, gradient=True) assert f < 0.05 assert g[0] < 0.05 assert cp.all(g[1] < 0.05) mssim, grad, s = structural_similarity(X, Y, data_range=255, gradient=True, full=True) assert cp.all(grad < 0.05)
def spherical_cosmask(n,mask_radius, edge_width, origin=None): """mask = spherical_cosmask(n, mask_radius, edge_width, origin) """ if type(n) is int: n = np.array([n]) sz = np.array([1, 1, 1]) sz[0:np.size(n)] = n[:] szl = -np.floor(sz/2) szh = szl + sz x,y,z = np.meshgrid( np.arange(szl[0],szh[0]), np.arange(szl[1],szh[1]), np.arange(szl[2],szh[2]), indexing='ij', sparse=True) r = np.sqrt(x*x + y*y + z*z) m = np.zeros(sz.tolist()) # edgezone = np.where( (x*x + y*y + z*z >= mask_radius) & (x*x + y*y + z*z <= np.square(mask_radius + edge_width))) edgezone = np.all( [ (x*x + y*y + z*z >= mask_radius), (x*x + y*y + z*z <= np.square(mask_radius + edge_width))], axis=0) m[edgezone] = 0.5 + 0.5*np.cos( 2*np.pi*(r[edgezone] - mask_radius) / (2*edge_width)) m[ np.all( [ (x*x + y*y + z*z <= mask_radius*mask_radius) ], axis=0 ) ] = 1 # m[ np.where(x*x + y*y + z*z <= mask_radius*mask_radius)] = 1 return m
def test_adjacent_and_different(self): image = cp.zeros((10, 20)) labels = cp.zeros((10, 20), int) image[5, 5] = 1 image[5, 6] = 0.5 labels[5, 5:6] = 1 expected = image == 1 with expected_warnings(["indices argument is deprecated"]): result = peak.peak_local_max( image, labels=labels, footprint=cp.ones((3, 3), bool), min_distance=1, threshold_rel=0, indices=False, exclude_border=False, ) assert cp.all(result == expected) result = peak.peak_local_max( image, labels=labels, min_distance=1, threshold_rel=0, indices=False, exclude_border=False, ) assert cp.all(result == expected)
def test_dropout_forward(self): _, y = self.states.forward( cupy.cudnn.get_handle(), self.x, self.ratio) if self.ratio == 0: self.assertTrue(cupy.all(self.x == y)) else: self.assertTrue(cupy.all(self.x != y))
def test_sobel_h_horizontal(): """Horizontal Sobel on an edge should be a horizontal line.""" i, j = cp.mgrid[-5:6, -5:6] image = (i >= 0).astype(float) result = filters.sobel_h(image) # Check if result match transform direction assert cp.all(result[i == 0] == 1) assert cp.all(result[cp.abs(i) > 1] == 0)
def test_scharr_v_vertical(): """Vertical Scharr on an edge should be a vertical line.""" i, j = cp.mgrid[-5:6, -5:6] image = (j >= 0).astype(float) result = filters.scharr_v(image) # Check if result match transform direction assert cp.all(result[j == 0] == 1) assert cp.all(result[cp.abs(j) > 1] == 0)
def test_arraymap_update(): in_values = cp.unique(cp.random.randint(0, 200, size=5)) out_values = cp.random.random(len(in_values)) m = ArrayMap(in_values, out_values) image = cp.random.randint(1, len(m), size=(512, 512)) assert cp.all(m[image] < 1) # missing values map to 0. m[1:] += 1 assert cp.all(m[image] >= 1)
def predict_df(x): # column major array inplace_predt = booster.inplace_predict(x.values) d = xgb.DMatrix(x) copied_predt = cp.array(booster.predict(d)) assert cp.all(copied_predt == inplace_predt) inplace_predt = booster.inplace_predict(x) return cp.all(copied_predt == inplace_predt)
def test_arraymap_bool_index(): in_values = cp.unique(cp.random.randint(0, 200, size=5)) out_values = cp.random.random(len(in_values)) m = ArrayMap(in_values, out_values) image = cp.random.randint(1, len(in_values), size=(512, 512)) assert cp.all(m[image] < 1) # missing values map to 0. positive = cp.ones(len(m), dtype=bool) positive[0] = False m[positive] += 1 assert cp.all(m[image] >= 1)
def test_read_write(self, tmpdir): cd = self.init_cuda_dict() filename = os.path.join(tmpdir, 'test_cd.npz') CudaDict.save(filename, cd) new_cd = CudaDict.load(filename, tpb=cd.tpb) assert len(new_cd) == len(cd) assert cp.all(cd.contains(new_cd.keys())) assert cp.all(new_cd.contains(cd.keys())) assert cp.all(cd[new_cd.keys()] == new_cd.values()) assert cp.all(new_cd[cd.keys()] == cd.values())
def test_output(output_type, dtype, order, shape): inp = create_input('numpy', dtype, shape, order) ary = CumlArray(inp) if dtype in unsupported_cudf_dtypes and \ output_type in ['series', 'dataframe', 'cudf']: with pytest.raises(ValueError): res = ary.to_output(output_type) elif shape in [(10, 5), (1, 10)] and output_type == 'series': with pytest.raises(ValueError): res = ary.to_output(output_type) else: res = ary.to_output(output_type) # using correct numba ndarray check if output_type == 'numba': assert cuda.devicearray.is_cuda_ndarray(res) elif output_type == 'cudf': if shape in [(10, 5), (1, 10)]: assert isinstance(res, cudf.DataFrame) else: assert isinstance(res, cudf.Series) else: assert isinstance(res, test_output_types[output_type]) if output_type == 'numpy': assert np.all(inp == ary.to_output('numpy')) elif output_type == 'cupy': assert cp.all(cp.asarray(inp) == ary.to_output('cupy')) elif output_type == 'numba': assert cp.all(cp.asarray(cuda.to_device(inp)) == cp.asarray(res)) elif output_type == 'series': comp = cudf.Series(np.ravel(inp)) == res assert np.all(comp.to_array()) elif output_type == 'dataframe': mat = cuda.to_device(inp) if len(mat.shape) == 1: mat = mat.reshape(mat.shape[0], 1) comp = cudf.DataFrame.from_gpu_matrix(mat) comp = comp == res assert np.all(comp.as_gpu_matrix().copy_to_host()) # check for e2e cartesian product: if output_type not in ['dataframe', 'cudf']: res2 = CumlArray(res) res2 = res2.to_output('numpy') if output_type == 'series' and shape == (10, 1): assert np.all(inp.reshape((1, 10)) == res2) else: assert np.all(inp == res2)
def test_init(self): cd = self.init_cuda_dict() cd.bpg = ceil(len(self.test_keys) / cd.tpb) assert cp.all(cd.contains(self.test_keys)) assert cp.all(cd[self.test_keys] == self.test_values) cd.bpg = ceil(len(self.test_lookup_keys_avail) / cd.tpb) assert cp.all(cd.contains(self.test_lookup_keys_avail)) assert cp.all(cd[self.test_lookup_keys_avail] == self.test_lookup_values_avail) cd.bpg = ceil(len(self.test_lookup_keys_unavail) / cd.tpb) assert cp.all(cd[self.test_lookup_keys_unavail] == self.test_default[0]) assert not cp.any(cd.contains(self.test_lookup_keys_unavail))
def test_generate_random_first_generation_returns_individuals_with_values_that_are_either_0_or_1( ): # Given number_of_individuals = 100 number_of_literals = 40 # When population = generate_random_first_generation(number_of_individuals, number_of_literals) # Then assert cupy.all(population <= 1) assert cupy.all(population >= 0)
def test_dropout_seed(self): # initialize Dropoutstates with the same seed states2 = cudnn.DropoutStates(None, self.seed) rspace, y = self.states.forward(None, self.x, self.ratio) rspace2, y2 = states2.forward(None, self.x, self.ratio) # forward results must be the same self.assertTrue(cupy.all(y == y2)) gx = self.states.backward(None, self.gy, self.ratio, rspace) gx2 = states2.backward(None, self.gy, self.ratio, rspace2) # backward results must be the same self.assertTrue(cupy.all(gx == gx2))
def test_device_gpu(self): import cupy as cp mv = MedicalVolume(np.ones((10, 20, 30)), self._AFFINE) mv_gpu = mv.to(Device(0)) assert mv_gpu.device == Device(0) assert isinstance(mv_gpu.volume, cp.ndarray) assert isinstance(mv_gpu.affine, np.ndarray) assert mv_gpu.is_same_dimensions(mv) assert cp.all((mv_gpu + 1).volume == 2) assert cp.all((mv_gpu - 1).volume == 0) assert cp.all((mv_gpu * 2).volume == 2) assert cp.all((mv_gpu / 2).volume == 0.5) assert cp.all((mv_gpu > 0).volume) assert cp.all((mv_gpu >= 0).volume) assert cp.all((mv_gpu < 2).volume) assert cp.all((mv_gpu <= 2).volume) ornt = tuple(x[::-1] for x in mv_gpu.orientation[::-1]) mv2 = mv_gpu.reformat(ornt) assert mv2.orientation == ornt mv_cpu = mv_gpu.cpu() assert mv_cpu.device == Device(-1) assert mv_cpu.is_identical(mv) with self.assertRaises(RuntimeError): mv_gpu.save_volume( os.path.join(self._TEMP_PATH, "test_device.nii.gz"))
def fit_custom(X, n_clusters, max_iter): assert X.ndim == 2 n_samples = len(X) pred = cupy.zeros(n_samples) initial_indexes = cupy.random.choice(n_samples, n_clusters, replace=False) centers = X[initial_indexes] for _ in range(max_iter): distances = var_kernel(X[:, None, 0], X[:, None, 1], centers[None, :, 1], centers[None, :, 0]) new_pred = cupy.argmin(distances, axis=1) if cupy.all(new_pred == pred): break pred = new_pred i = cupy.arange(n_clusters) mask = pred == i[:, None] sums = sum_kernel(X, mask[:, :, None], axis=1) counts = count_kernel(mask, axis=1).reshape((n_clusters, 1)) centers = sums / counts return centers, pred
def test_no_motion_3d(): rnd = np.random.RandomState(0) img = cp.array(rnd.normal(size=(64, 64, 64))) flow = optical_flow_tvl1(img, img) assert cp.all(flow == 0)
def test_fit(): ld = Loda(n_random_cuts=10, n_bins=None) x = cupy.random.randint(0, 100, size=(200, 10)) ld.fit(x) assert ld._histograms is not None assert isinstance(ld._histograms, cupy.ndarray) assert cupy.all(ld._histograms > 0)
def test_deepcopy(input_type): if input_type == 'series': inp = create_input(input_type, np.float32, (10, 1), 'C') else: inp = create_input(input_type, np.float32, (10, 5), 'F') ary = CumlArray(data=inp) b = deepcopy(ary) if input_type == 'numpy': assert np.all(inp == b.to_output('numpy')) elif input_type == 'series': assert np.all(inp == b.to_output('series')) else: assert cp.all(inp == cp.asarray(b)) assert ary.ptr != b.ptr assert ary.__cuda_array_interface__['shape'] == \ b.__cuda_array_interface__['shape'] assert ary.__cuda_array_interface__['strides'] == \ b.__cuda_array_interface__['strides'] assert ary.__cuda_array_interface__['typestr'] == \ b.__cuda_array_interface__['typestr'] if input_type != 'series': # skipping one dimensional ary order test assert ary.order == b.order
def test_serialize(input_type): if input_type == 'series': inp = create_input(input_type, np.float32, (10, 1), 'C') else: inp = create_input(input_type, np.float32, (10, 5), 'F') ary = CumlArray(data=inp) header, frames = ary.serialize() ary2 = CumlArray.deserialize(header, frames) assert pickle.loads(header['type-serialized']) is CumlArray assert all(isinstance(f, Buffer) for f in frames) if input_type == 'numpy': assert np.all(inp == ary2.to_output('numpy')) elif input_type == 'series': assert np.all(inp == ary2.to_output('series')) else: assert cp.all(inp == cp.asarray(ary2)) assert ary.__cuda_array_interface__['shape'] == \ ary2.__cuda_array_interface__['shape'] assert ary.__cuda_array_interface__['strides'] == \ ary2.__cuda_array_interface__['strides'] assert ary.__cuda_array_interface__['typestr'] == \ ary2.__cuda_array_interface__['typestr'] if input_type != 'series': # skipping one dimensional ary order test assert ary.order == ary2.order
def test_array_init_bad(input_type, dtype, shape, order): """ This test ensures that we assert on incorrect combinations of arguments when creating CumlArray """ if input_type == 'series': if dtype == np.float16: pytest.skip("Skipping due to cuDF issue #9065") inp = create_input(input_type, dtype, shape, 'C') else: inp = create_input(input_type, dtype, shape, order) # Ensure the array is creatable cuml_ary = CumlArray(inp) with pytest.raises(AssertionError): CumlArray(inp, dtype=cuml_ary.dtype) with pytest.raises(AssertionError): CumlArray(inp, shape=cuml_ary.shape) with pytest.raises(AssertionError): CumlArray(inp, order=_strides_to_order(cuml_ary.strides, cuml_ary.dtype)) assert cp.all(cp.asarray(inp) == cp.asarray(cuml_ary))
def test_sobel_vertical(): """Sobel on a vertical edge should be a vertical line.""" i, j = cp.mgrid[-5:6, -5:6] image = (j >= 0).astype(float) result = filters.sobel(image) * np.sqrt(2) assert_allclose(result[j == 0], 1) assert cp.all(result[cp.abs(j) > 1] == 0)
def test_no_motion_3d(): rnd = cp.random.RandomState(0) img = rnd.normal(size=(128, 128, 128)) flow = optical_flow_tvl1(img, img) assert cp.all(flow == 0)
def test_01_01_circle(self): """Test that the Canny filter finds the outlines of a circle""" i, j = cp.mgrid[-200:200, -200:200].astype(float) / 200 c = cp.abs(cp.sqrt(i * i + j * j) - 0.5) < 0.02 result = feature.canny(c.astype(float), 4, 0, 0, cp.ones(c.shape, bool)) # # erode and dilate the circle to get rings that should contain the # outlines # # TODO: grlee77: only implemented brute_force=True, so added that to # these tests cd = binary_dilation(c, iterations=3, brute_force=True) ce = binary_erosion(c, iterations=3, brute_force=True) cde = cp.logical_and(cd, cp.logical_not(ce)) self.assertTrue(cp.all(cde[result])) # # The circle has a radius of 100. There are two rings here, one # for the inside edge and one for the outside. So that's # 100 * 2 * 2 * 3 for those places where pi is still 3. # The edge contains both pixels if there's a tie, so we # bump the count a little. point_count = cp.sum(result) self.assertTrue(point_count > 1200) self.assertTrue(point_count < 1600)
def test_downsize_anti_aliasing(): x = cp.zeros((10, 10), dtype=np.double) x[2, 2] = 1 scaled = resize(x, (5, 5), order=1, anti_aliasing=True, mode="constant") assert_array_equal(scaled.shape, (5, 5)) assert cp.all(scaled[:3, :3] > 0) assert_array_equal(scaled[3:, :].sum(), 0) assert_array_equal(scaled[:, 3:].sum(), 0) sigma = 0.125 out_size = (5, 5) resize( x, out_size, order=1, mode="constant", anti_aliasing=True, anti_aliasing_sigma=sigma, ) resize( x, out_size, order=1, mode="edge", anti_aliasing=True, anti_aliasing_sigma=sigma, ) resize( x, out_size, order=1, mode="symmetric", anti_aliasing=True, anti_aliasing_sigma=sigma, ) resize( x, out_size, order=1, mode="reflect", anti_aliasing=True, anti_aliasing_sigma=sigma, ) resize( x, out_size, order=1, mode="wrap", anti_aliasing=True, anti_aliasing_sigma=sigma, ) with pytest.raises(ValueError): # Unknown mode, or cannot translate mode resize( x, out_size, order=1, mode="non-existent", anti_aliasing=True, anti_aliasing_sigma=sigma, )
def test_make_regression(n_samples, n_features, n_informative, n_targets, bias, effective_rank, tail_strength, noise, shuffle, coef, random_state, n_parts, cluster): c = Client(cluster) try: from cuml.dask.datasets import make_regression result = make_regression(n_samples=n_samples, n_features=n_features, n_informative=n_informative, n_targets=n_targets, bias=bias, effective_rank=effective_rank, noise=noise, shuffle=shuffle, coef=coef, random_state=random_state, n_parts=n_parts) if coef: out, values, coefs = result else: out, values = result assert out.shape == (n_samples, n_features), "out shape mismatch" if n_targets > 1: assert values.shape == (n_samples, n_targets), \ "values shape mismatch" else: assert values.shape == (n_samples, ), "values shape mismatch" assert len(out.chunks[0]) == n_parts assert len(out.chunks[1]) == 1 if coef: if n_targets > 1: assert coefs.shape == (n_features, n_targets), \ "coefs shape mismatch" assert len(coefs.chunks[1]) == 1 else: assert coefs.shape == (n_features, ), "coefs shape mismatch" assert len(coefs.chunks[0]) == 1 test1 = da.all(da.sum(coefs != 0.0, axis=0) == n_informative) std_test2 = da.std(values - (da.dot(out, coefs) + bias), axis=0) test1, std_test2 = da.compute(test1, std_test2) diff = cp.abs(1.0 - std_test2) test2 = cp.all(diff < 1.5 * 10**(-1.)) assert test1, \ "Unexpected number of informative features" assert test2, "Unexpectedly incongruent outputs" finally: c.close()
def test_farid_vertical(): """Farid on a vertical edge should be a vertical line.""" i, j = cp.mgrid[-5:6, -5:6] image = (j >= 0).astype(float) result = filters.farid(image) * np.sqrt(2) assert cp.all(result[j == 0] == result[j == 0][0]) assert_allclose(result[cp.abs(j) > 2], 0, atol=1e-10)
def remove_duplicate(params, grads): ''' パラメータ配列中の重複する重みをひとつに集約し、 その重みに対応する勾配を加算する ''' params, grads = params[:], grads[:] # copy list while True: find_flg = False L = len(params) for i in range(0, L - 1): for j in range(i + 1, L): # 重みを共有する場合 if params[i] is params[j]: grads[i] += grads[j] # 勾配の加算 find_flg = True params.pop(j) grads.pop(j) # 転置行列として重みを共有する場合(weight tying) elif params[i].ndim == 2 and params[j].ndim == 2 and \ params[i].T.shape == params[j].shape and cp.all(params[i].T == params[j]): grads[i] += grads[j].T find_flg = True params.pop(j) grads.pop(j) if find_flg: break if find_flg: break if not find_flg: break return params, grads
def check_finite(array, force_all_finite=True): """Checks that the input is finite if necessary Parameters ---------- array : object Input object to check / convert. force_all_finite : boolean or 'allow-nan', (default=True) Whether to raise an error on np.inf, np.nan, pd.NA in array. The possibilities are: - True: Force all values of array to be finite. - False: accepts np.inf, np.nan, pd.NA in array. - 'allow-nan': accepts only np.nan and pd.NA values in array. Values cannot be infinite. ``force_all_finite`` accepts the string ``'allow-nan'``. Returns ------- None or raise error """ if force_all_finite is True: if not cp.all(cp.isfinite(array)): raise ValueError("Non-finite value encountered in array") elif force_all_finite == 'allow-nan': if cp.any(cp.isinf(array)): raise ValueError("Non-finite value encountered in array")