def get_dH2(lab1, lab2): """squared hue difference term occurring in deltaE_cmc and deltaE_ciede94 Despite its name, "dH" is not a simple difference of hue values. We avoid working directly with the hue value, since differencing angles is troublesome. The hue term is usually written as: c1 = sqrt(a1**2 + b1**2) c2 = sqrt(a2**2 + b2**2) term = (a1-a2)**2 + (b1-b2)**2 - (c1-c2)**2 dH = sqrt(term) However, this has poor roundoff properties when a or b is dominant. Instead, ab is a vector with elements a and b. The same dH term can be re-written as: |ab1-ab2|**2 - (|ab1| - |ab2|)**2 and then simplified to: 2*|ab1|*|ab2| - 2*dot(ab1, ab2) """ a1, b1 = cp.rollaxis(lab1, -1)[1:3] a2, b2 = cp.rollaxis(lab2, -1)[1:3] # magnitude of (a, b) is the chroma C1 = cp.hypot(a1, b1) C2 = cp.hypot(a2, b2) term = (C1 * C2) - (a1 * a2 + b1 * b2) return 2 * term
def dot(a, b, out=None): """Returns a dot product of two arrays. For arrays with more than one axis, it computes the dot product along the last axis of ``a`` and the second-to-last axis of ``b``. This is just a matrix product if the both arrays are 2-D. For 1-D arrays, it uses their unique axis as an axis to take dot product over. Args: a (cupy.ndarray): The left argument. b (cupy.ndarray): The right argument. out (cupy.ndarray): Output array. Returns: cupy.ndarray: The dot product of ``a`` and ``b``. .. seealso:: :func:`numpy.dot` """ a_ndim = a.ndim b_ndim = b.ndim assert a_ndim > 0 and b_ndim > 0 a_is_vec = a_ndim == 1 b_is_vec = b_ndim == 1 if a_is_vec: a = cupy.reshape(a, (1, a.size)) a_ndim = 2 if b_is_vec: b = cupy.reshape(b, (b.size, 1)) b_ndim = 2 a_axis = a_ndim - 1 b_axis = b_ndim - 2 if a.shape[a_axis] != b.shape[b_axis]: raise ValueError('Axis dimension mismatch') if a_axis: a = cupy.rollaxis(a, a_axis, 0) if b_axis: b = cupy.rollaxis(b, b_axis, 0) k = a.shape[0] m = b.size // k n = a.size // k ret_shape = a.shape[1:] + b.shape[1:] if out is None: if a_is_vec: ret_shape = () if b_is_vec else ret_shape[1:] elif b_is_vec: ret_shape = ret_shape[:-1] else: if out.size != n * m: raise ValueError('Output array has an invalid size') if not out.flags.c_contiguous: raise ValueError('Output array must be C-contiguous') return _tensordot_core(a, b, out, n, m, k, ret_shape)
def deltaE_cie76(lab1, lab2): """Euclidean distance between two points in Lab color space Parameters ---------- lab1 : array_like reference color (Lab colorspace) lab2 : array_like comparison color (Lab colorspace) Returns ------- dE : array_like distance between colors `lab1` and `lab2` References ---------- .. [1] https://en.wikipedia.org/wiki/Color_difference .. [2] A. R. Robertson, "The CIE 1976 color-difference formulae," Color Res. Appl. 2, 7-11 (1977). """ L1, a1, b1 = cp.rollaxis(lab1, -1)[:3] L2, a2, b2 = cp.rollaxis(lab2, -1)[:3] out = (L2 - L1) * (L2 - L1) out += (a2 - a1) * (a2 - a1) out += (b2 - b1) * (b2 - b1) return cp.sqrt(out, out=out)
def moments_central(image, center=None, order=3, **kwargs): """Calculate all central image moments up to a certain order. The center coordinates (cr, cc) can be calculated from the raw moments as: {``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}. Note that central moments are translation invariant but not scale and rotation invariant. Parameters ---------- image : nD double or uint8 array Rasterized shape as image. center : tuple of float, optional Coordinates of the image centroid. This will be computed if it is not provided. order : int, optional The maximum order of moments computed. Returns ------- mu : (``order + 1``, ``order + 1``) array Central image moments. References ---------- .. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing: Core Algorithms. Springer-Verlag, London, 2009. .. [2] B. Jähne. Digital Image Processing. Springer-Verlag, Berlin-Heidelberg, 6. edition, 2005. .. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image Features, from Lecture notes in computer science, p. 676. Springer, Berlin, 1993. .. [4] https://en.wikipedia.org/wiki/Image_moment Examples -------- >>> image = cp.zeros((20, 20), dtype=cp.double) >>> image[13:17, 13:17] = 1 >>> M = moments(image) >>> centroid = (M[1, 0] / M[0, 0], M[0, 1] / M[0, 0]) >>> moments_central(image, centroid) array([[16., 0., 20., 0.], [ 0., 0., 0., 0.], [20., 0., 25., 0.], [ 0., 0., 0., 0.]]) """ if center is None: center = centroid(image) calc = image.astype(float) for dim, dim_length in enumerate(image.shape): delta = cp.arange(dim_length, dtype=float) - center[dim] powers_of_delta = delta[:, cp.newaxis] ** cp.arange(order + 1) calc = cp.rollaxis(calc, dim, image.ndim) calc = cp.dot(calc, powers_of_delta) calc = cp.rollaxis(calc, -1, dim) return calc
def convolve2d(in1, in2, mode='full'): """ note only support H * W * N * 1 convolve 2d """ in1 = in1.transpose(2, 3, 0, 1) # to N * C * H * W in2 = in2.transpose(2, 3, 0, 1) out_c, _, kh, kw = in2.shape n, _, h, w = in1.shape if mode == 'full': ph, pw = kh - 1, kw - 1 out_h, out_w = h - kh + 1 + ph * 2, w - kw + 1 + pw * 2 # TODO elif mode == 'valid': ph, pw = 0, 0 out_h, out_w = h - kh + 1, w - kw + 1 # TODO else: raise NotImplementedError y = cp.empty((n, out_c, out_h, out_w), dtype=in1.dtype) col = im2col_gpu(in1, kh, kw, 1, 1, ph, pw) y = cp.tensordot(col, in2, ((1, 2, 3), (1, 2, 3))).astype(in1.dtype, copy=False) y = cp.rollaxis(y, 3, 1) return y.transpose(2, 3, 0, 1)
def reshape_x_cupy( data: np.ndarray, dtype=cp.float16, hide_map_prob: float = 0.0 ) -> np.ndarray: """ Get images from data as a list and preprocess them (using GPU). Input: - data: ndarray [num_examples x 6] -dtype: numpy dtype for the output array -hide_map_prob: Probability for removing the minimap (black square) from the sequence of images (0<=hide_map_prob<=1) Output: - ndarray [num_examples * 5, num_channels, H, W] """ mean = cp.array([0.485, 0.456, 0.406], dtype=dtype) std = cp.array([0.229, 0.224, 0.225], dtype=dtype) reshaped = np.zeros((len(data) * 5, 3, 270, 480), dtype=dtype) for i in range(0, len(data)): black_minimap: bool = (random.random() <= hide_map_prob) for j in range(0, 5): img = cp.array(data[i][j], dtype=dtype) if black_minimap: # Put a black square over the minimap img[215:, :80] = cp.zeros((55, 80, 3), dtype=dtype) reshaped[i * 5 + j] = cp.asnumpy( cp.rollaxis((img / dtype(255.0)) - mean / std, 2, 0,) ) return reshaped
def take(a, indices, axis=None, out=None): """Takes elements of an array at specified indices along an axis. This is an implementation of "fancy indexing" at single axis. This function does not support ``mode`` option. Args: a (cupy.ndarray): Array to extract elements. indices (int or array-like): Indices of elements that this function takes. axis (int): The axis along which to select indices. The flattened input is used by default. out (cupy.ndarray): Output array. If provided, it should be of appropriate shape and dtype. Returns: cupy.ndarray: The result of fancy indexing. .. seealso:: :func:`numpy.take` """ if axis is None: a = a.ravel() lshape = () rshape = () else: if axis >= a.ndim: raise ValueError('Axis overrun') lshape = a.shape[:axis] rshape = a.shape[axis + 1:] if numpy.isscalar(indices): a = cupy.rollaxis(a, axis) if out is None: return a[indices].copy() else: out[:] = a[indices] return out elif not isinstance(indices, cupy.ndarray): indices = cupy.array(indices, dtype=int) out_shape = lshape + indices.shape + rshape if out is None: out = cupy.empty(out_shape, dtype=a.dtype) else: if out.dtype != a.dtype: raise TypeError('Output dtype mismatch') if out.shape != out_shape: raise ValueError('Output shape mismatch') cdim = indices.size rdim = internal.prod(rshape) indices = cupy.reshape( indices, (1,) * len(lshape) + indices.shape + (1,) * len(rshape)) return _take_kernel(a, indices, cdim, rdim, out)
def calc_single_view(ioperand, subscript): """Calculates 'ii->i' by cupy.diagonal if needed. Args: ioperand (cupy.ndarray): Array to be calculated diagonal. subscript (str): Specifies the subscripts. If the same label appears more than once, calculate diagonal for those axes. """ if '@' in subscript: assert subscript.count('@') == 1 assert ioperand.ndim >= len(subscript) - 1 else: assert ioperand.ndim == len(subscript) subscripts_excluded_at = subscript.replace('@', '') labels = set(subscripts_excluded_at) label_to_axis = collections.defaultdict(list) for i, label in enumerate(subscript): label_to_axis[label].append(i) result = ioperand count_dict = collections.Counter(subscript) ellipsis_pos = subscript.find('@') for label in labels: if count_dict[label] == 1: continue axes_to_diag = [] for i, char in enumerate(subscripts_excluded_at): if char == label: if ellipsis_pos == -1 or i < ellipsis_pos: axes_to_diag.append(i) else: axes_to_diag.append(i - len(subscripts_excluded_at)) axes_to_diag = cupy.core.normalize_axis_tuple(axes_to_diag, result.ndim) for axis in reversed(axes_to_diag[1:]): shape_a = result.shape[axis] shape_b = result.shape[axes_to_diag[0]] if shape_a != shape_b: raise ValueError('dimensions in operand 0 for collapsing' ' index \'{0}\' don\'t match' ' ({1} != {2})'.format(label, shape_a, shape_b)) result = result.diagonal(0, axis, axes_to_diag[0]) result = cupy.rollaxis(result, -1, axes_to_diag[0]) if ellipsis_pos != -1 and axis > ellipsis_pos: axis -= result.ndim - len(subscript) + 1 subscript = subscript[:axis] + subscript[axis + 1:] subscripts_excluded_at = subscript.replace('@', '') return result, subscript
def inner(a, b): """Returns the inner product of two arrays. It uses the last axis of each argument to take sum product. Args: a (cupy.ndarray): The first argument. b (cupy.ndarray): The second argument. Returns: cupy.ndarray: The inner product of ``a`` and ``b``. .. seealso:: :func:`numpy.inner` """ a_ndim = a.ndim b_ndim = b.ndim if a_ndim == 0 or b_ndim == 0: return cupy.multiply(a, b) a_axis = a_ndim - 1 b_axis = b_ndim - 1 if a.shape[-1] != b.shape[-1]: raise ValueError('Axis dimension mismatch') if a_axis: a = cupy.rollaxis(a, a_axis, 0) if b_axis: b = cupy.rollaxis(b, b_axis, 0) ret_shape = a.shape[1:] + b.shape[1:] k = a.shape[0] n = a.size // k m = b.size // k return core.tensordot_core(a, b, None, n, m, k, ret_shape)
def inner(a, b): """Returns the inner product of two arrays. It uses the last axis of each argument to take sum product. Args: a (cupy.ndarray): The first argument. b (cupy.ndarray): The second argument. Returns: cupy.ndarray: The inner product of ``a`` and ``b``. .. seealso:: :func:`numpy.inner` """ a_ndim = a.ndim b_ndim = b.ndim if a_ndim == 0 or b_ndim == 0: return cupy.multiply(a, b) a_axis = a_ndim - 1 b_axis = b_ndim - 1 if a.shape[-1] != b.shape[-1]: raise ValueError('Axis dimension mismatch') if a_axis: a = cupy.rollaxis(a, a_axis, 0) if b_axis: b = cupy.rollaxis(b, b_axis, 0) ret_shape = a.shape[1:] + b.shape[1:] k = a.shape[0] n = a.size // k m = b.size // k return _tensordot_core(a, b, None, n, m, k, ret_shape)
def reshape_x_cupy( data: np.ndarray, dtype=cp.float16, hide_map_prob: float = 0.0, dropout_images_prob: List[float] = None, ) -> np.ndarray: """ Get images from data as a list and preprocess them (using GPU). Input: - data: ndarray [num_examples x 6] -dtype: numpy dtype for the output array -hide_map_prob: Probability for removing the minimap (black square) from the sequence of images (0<=hide_map_prob<=1) Output: - ndarray [num_examples * 5, num_channels, H, W] """ mean = cp.array([0.485, 0.456, 0.406], dtype=dtype) std = cp.array([0.229, 0.224, 0.225], dtype=dtype) reshaped = np.zeros( (len(data) * 5, 3, data[0][0].shape[0], data[0][0].shape[1]), dtype=dtype) for i in range(0, len(data)): black_minimap: bool = (random.random() <= hide_map_prob) for j in range(0, 5): black_image: bool = False if dropout_images_prob is not None: black_image = random.random() <= dropout_images_prob[j] if black_image: reshaped[i * 5 + j] = np.zeros( (3, data[i][j].shape[0], data[i][j].shape[1]), dtype=dtype) else: img = cp.array(data[i][j], dtype=dtype) if black_minimap: # Put a black square over the minimap img[140:, :55] = cp.zeros((40, 55, 3), dtype=dtype) reshaped[i * 5 + j] = cp.asnumpy( cp.rollaxis( (img / dtype(255.0)) - mean / std, 2, 0, )) return reshaped
def reshape_x_cupy(data: np.ndarray, dtype=cp.float16) -> np.ndarray: """ Get images from data as a list and preprocess them (using GPU). Input: - data: ndarray [num_examples x 6] -dtype: numpy dtype for the output array from the sequence of images Output: - ndarray [num_examples * 5, num_channels, H, W] """ mean = cp.array([0.485, 0.456, 0.406], dtype=dtype) std = cp.array([0.229, 0.224, 0.225], dtype=dtype) reshaped = np.zeros((len(data) * 5, 3, 270, 480), dtype=dtype) for i in range(0, len(data)): for j in range(0, 5): img = cp.array(data[i][j], dtype=dtype) / 255 reshaped[i * 5 + j] = cp.asnumpy( cp.rollaxis((img - mean) / std, 2)) return reshaped
def calc_single_view(ioperand, subscript): """Calculates 'ii->i' by cupy.diagonal if needed. Args: ioperand (cupy.ndarray): Array to be calculated diagonal. subscript (str): Specifies the subscripts. If the same label appears more than once, calculate diagonal for those axes. """ assert ioperand.ndim == len(subscript) labels = set(subscript) label_to_axis = collections.defaultdict(list) for i, label in enumerate(subscript): label_to_axis[label].append(i) result = ioperand count_dict = collections.Counter(subscript) for label in labels: if count_dict[label] == 1: continue axes_to_diag = [] for i, char in enumerate(subscript): if char == label: axes_to_diag.append(i) for axis in reversed(axes_to_diag[1:]): shape_a = result.shape[axis] shape_b = result.shape[axes_to_diag[0]] if shape_a != shape_b: raise ValueError('dimensions in operand 0 for collapsing' ' index \'{0}\' don\'t match' ' ({1} != {2})'.format( label, shape_a, shape_b)) result = result.diagonal(0, axis, axes_to_diag[0]) result = cupy.rollaxis(result, -1, axes_to_diag[0]) subscript = subscript[:axis] + subscript[axis + 1:] return result, subscript
def deltaE_ciede94(lab1, lab2, kH=1, kC=1, kL=1, k1=0.045, k2=0.015): """Color difference according to CIEDE 94 standard Accommodates perceptual non-uniformities through the use of application specific scale factors (`kH`, `kC`, `kL`, `k1`, and `k2`). Parameters ---------- lab1 : array_like reference color (Lab colorspace) lab2 : array_like comparison color (Lab colorspace) kH : float, optional Hue scale kC : float, optional Chroma scale kL : float, optional Lightness scale k1 : float, optional first scale parameter k2 : float, optional second scale parameter Returns ------- dE : array_like color difference between `lab1` and `lab2` Notes ----- deltaE_ciede94 is not symmetric with respect to lab1 and lab2. CIEDE94 defines the scales for the lightness, hue, and chroma in terms of the first color. Consequently, the first color should be regarded as the "reference" color. `kL`, `k1`, `k2` depend on the application and default to the values suggested for graphic arts ========== ============== ========== Parameter Graphic Arts Textiles ========== ============== ========== `kL` 1.000 2.000 `k1` 0.045 0.048 `k2` 0.015 0.014 ========== ============== ========== References ---------- .. [1] https://en.wikipedia.org/wiki/Color_difference .. [2] http://www.brucelindbloom.com/index.html?Eqn_DeltaE_CIE94.html """ L1, C1 = cp.rollaxis(lab2lch(lab1), -1)[:2] L2, C2 = cp.rollaxis(lab2lch(lab2), -1)[:2] dL = L1 - L2 dC = C1 - C2 dH2 = get_dH2(lab1, lab2) SL = 1 SC = 1 + k1 * C1 SH = 1 + k2 * C1 dE2 = dL / (kL * SL) dE2 *= dE2 tmp = dC / (kC * SC) tmp *= tmp dE2 += tmp tmp = kH * SH tmp *= tmp dE2 += dH2 / tmp return cp.sqrt(cp.maximum(dE2, 0, out=dE2), out=dE2)
def test_each_channel(): filtered = edges_each(COLOR_IMAGE) for i, channel in enumerate(cp.rollaxis(filtered, axis=-1)): expected = img_as_float(filters.sobel(COLOR_IMAGE[:, :, i])) assert_allclose(channel, expected)
def test_each_channel_with_filter_argument(): filtered = smooth_each(COLOR_IMAGE, SIGMA) for i, channel in enumerate(cp.rollaxis(filtered, axis=-1)): assert_allclose(channel, smooth(COLOR_IMAGE[:, :, i]))
def deltaE_ciede2000(lab1, lab2, kL=1, kC=1, kH=1): """Color difference as given by the CIEDE 2000 standard. CIEDE 2000 is a major revision of CIDE94. The perceptual calibration is largely based on experience with automotive paint on smooth surfaces. Parameters ---------- lab1 : array_like reference color (Lab colorspace) lab2 : array_like comparison color (Lab colorspace) kL : float (range), optional lightness scale factor, 1 for "acceptably close"; 2 for "imperceptible" see deltaE_cmc kC : float (range), optional chroma scale factor, usually 1 kH : float (range), optional hue scale factor, usually 1 Returns ------- deltaE : array_like The distance between `lab1` and `lab2` Notes ----- CIEDE 2000 assumes parametric weighting factors for the lightness, chroma, and hue (`kL`, `kC`, `kH` respectively). These default to 1. References ---------- .. [1] https://en.wikipedia.org/wiki/Color_difference .. [2] http://www.ece.rochester.edu/~gsharma/ciede2000/ciede2000noteCRNA.pdf :DOI:`10.1364/AO.33.008069` .. [3] M. Melgosa, J. Quesada, and E. Hita, "Uniformity of some recent color metrics tested with an accurate color-difference tolerance dataset," Appl. Opt. 33, 8069-8077 (1994). """ warnings.warn( "The numerical accuracy of this function on the GPU is reduced " "relative to the CPU version" ) unroll = False if lab1.ndim == 1 and lab2.ndim == 1: unroll = True if lab1.ndim == 1: lab1 = lab1[None, :] if lab2.ndim == 1: lab2 = lab2[None, :] L1, a1, b1 = cp.rollaxis(lab1, -1)[:3] L2, a2, b2 = cp.rollaxis(lab2, -1)[:3] # distort `a` based on average chroma # then convert to lch coordines from distorted `a` # all subsequence calculations are in the new coordiantes # (often denoted "prime" in the literature) Cbar = 0.5 * (cp.hypot(a1, b1) + cp.hypot(a2, b2)) c7 = Cbar ** 7 G = 0.5 * (1 - cp.sqrt(c7 / (c7 + 25 ** 7))) scale = 1 + G C1, h1 = _cart2polar_2pi(a1 * scale, b1) C2, h2 = _cart2polar_2pi(a2 * scale, b2) # recall that c, h are polar coordiantes. c==r, h==theta # cide2000 has four terms to delta_e: # 1) Luminance term # 2) Hue term # 3) Chroma term # 4) hue Rotation term # lightness term Lbar = 0.5 * (L1 + L2) tmp = Lbar - 50 tmp *= tmp SL = 1 + 0.015 * tmp / cp.sqrt(20 + tmp) L_term = (L2 - L1) / (kL * SL) # chroma term Cbar = 0.5 * (C1 + C2) # new coordiantes SC = 1 + 0.045 * Cbar C_term = (C2 - C1) / (kC * SC) # hue term h_diff = h2 - h1 h_sum = h1 + h2 CC = C1 * C2 dH = h_diff.copy() dH[h_diff > np.pi] -= 2 * np.pi dH[h_diff < -np.pi] += 2 * np.pi dH[CC == 0.] = 0. # if r == 0, dtheta == 0 dH_term = 2 * cp.sqrt(CC) * cp.sin(dH / 2) Hbar = h_sum.copy() mask = cp.logical_and(CC != 0., cp.abs(h_diff) > np.pi) Hbar[mask * (h_sum < 2 * np.pi)] += 2 * np.pi Hbar[mask * (h_sum >= 2 * np.pi)] -= 2 * np.pi Hbar[CC == 0.] *= 2 Hbar *= 0.5 T = (1 - 0.17 * cp.cos(Hbar - np.deg2rad(30)) + 0.24 * cp.cos(2 * Hbar) + 0.32 * cp.cos(3 * Hbar + np.deg2rad(6)) - 0.20 * cp.cos(4 * Hbar - np.deg2rad(63)) ) SH = 1 + 0.015 * Cbar * T H_term = dH_term / (kH * SH) # hue rotation c7 = Cbar ** 7 Rc = 2 * cp.sqrt(c7 / (c7 + 25 ** 7)) tmp = (cp.rad2deg(Hbar) - 275) / 25 tmp *= tmp dtheta = np.deg2rad(30) * cp.exp(-tmp) R_term = -cp.sin(2 * dtheta) * Rc * C_term * H_term # put it all together dE2 = L_term * L_term dE2 += C_term * C_term dE2 += H_term * H_term dE2 += R_term cp.sqrt(cp.maximum(dE2, 0, out=dE2), out=dE2) if unroll: dE2 = dE2[0] return dE2
def deltaE_cmc(lab1, lab2, kL=1, kC=1): """Color difference from the CMC l:c standard. This color difference was developed by the Colour Measurement Committee (CMC) of the Society of Dyers and Colourists (United Kingdom). It is intended for use in the textile industry. The scale factors `kL`, `kC` set the weight given to differences in lightness and chroma relative to differences in hue. The usual values are ``kL=2``, ``kC=1`` for "acceptability" and ``kL=1``, ``kC=1`` for "imperceptibility". Colors with ``dE > 1`` are "different" for the given scale factors. Parameters ---------- lab1 : array_like reference color (Lab colorspace) lab2 : array_like comparison color (Lab colorspace) Returns ------- dE : array_like distance between colors `lab1` and `lab2` Notes ----- deltaE_cmc the defines the scales for the lightness, hue, and chroma in terms of the first color. Consequently ``deltaE_cmc(lab1, lab2) != deltaE_cmc(lab2, lab1)`` References ---------- .. [1] https://en.wikipedia.org/wiki/Color_difference .. [2] http://www.brucelindbloom.com/index.html?Eqn_DeltaE_CIE94.html .. [3] F. J. J. Clarke, R. McDonald, and B. Rigg, "Modification to the JPC79 colour-difference formula," J. Soc. Dyers Colour. 100, 128-132 (1984). """ L1, C1, h1 = cp.rollaxis(lab2lch(lab1), -1)[:3] L2, C2, h2 = cp.rollaxis(lab2lch(lab2), -1)[:3] dC = C1 - C2 dL = L1 - L2 dH2 = get_dH2(lab1, lab2) T = cp.where(cp.logical_and(cp.rad2deg(h1) >= 164, cp.rad2deg(h1) <= 345), 0.56 + 0.2 * cp.abs(np.cos(h1 + cp.deg2rad(168))), 0.36 + 0.4 * cp.abs(np.cos(h1 + cp.deg2rad(35))) ) c1_4 = C1 ** 4 F = cp.sqrt(c1_4 / (c1_4 + 1900)) SL = cp.where(L1 < 16, 0.511, 0.040975 * L1 / (1. + 0.01765 * L1)) SC = 0.638 + 0.0638 * C1 / (1. + 0.0131 * C1) SH = SC * (F * T + 1 - F) dE2 = (dL / (kL * SL)) ** 2 dE2 += (dC / (kC * SC)) ** 2 dE2 += dH2 / (SH ** 2) return cp.sqrt(cp.maximum(dE2, 0, out=dE2), out=dE2)
def percentile(a, q, axis=None, out=None, interpolation='linear', keepdims=False): """Computes the q-th percentile of the data along the specified axis. Args: a (cupy.ndarray): Array for which to compute percentiles. q (float, tuple of floats or cupy.ndarray): Percentiles to compute in the range between 0 and 100 inclusive. axis (int or tuple of ints): Along which axis or axes to compute the percentiles. The flattened array is used by default. out (cupy.ndarray): Output array. interpolation (str): Interpolation method when a quantile lies between two data points. ``linear`` interpolation is used by default. Supported interpolations are``lower``, ``higher``, ``midpoint``, ``nearest`` and ``linear``. keepdims (bool): If ``True``, the axis is remained as an axis of size one. Returns: cupy.ndarray: The percentiles of ``a``, along the axis if specified. .. seealso:: :func:`numpy.percentile` """ q = cupy.asarray(q, dtype=a.dtype) if q.ndim == 0: q = q[None] zerod = True else: zerod = False if q.ndim > 1: raise ValueError('Expected q to have a dimension of 1.\n' 'Actual: {0} != 1'.format(q.ndim)) if keepdims: if axis is None: keepdim = (1,) * a.ndim else: keepdim = list(a.shape) for ax in axis: keepdim[ax % a.ndim] = 1 keepdim = tuple(keepdim) # Copy a since we need it sorted but without modifying the original array if isinstance(axis, int): axis = axis, if axis is None: ap = a.flatten() nkeep = 0 else: # Reduce axes from a and put them last axis = tuple(ax % a.ndim for ax in axis) keep = set(range(a.ndim)) - set(axis) nkeep = len(keep) for i, s in enumerate(sorted(keep)): a = a.swapaxes(i, s) ap = a.reshape(a.shape[:nkeep] + (-1,)).copy() axis = -1 ap.sort(axis=axis) Nx = ap.shape[axis] indices = q * 0.01 * (Nx - 1.) # percents to decimals if interpolation == 'lower': indices = cupy.floor(indices).astype(cupy.int32) elif interpolation == 'higher': indices = cupy.ceil(indices).astype(cupy.int32) elif interpolation == 'midpoint': indices = 0.5 * (cupy.floor(indices) + cupy.ceil(indices)) elif interpolation == 'nearest': # TODO(hvy): Implement nearest using around raise ValueError("'nearest' interpolation is not yet supported. " 'Please use any other interpolation method.') elif interpolation == 'linear': pass else: raise ValueError('Unexpected interpolation method.\n' "Actual: '{0}' not in ('linear', 'lower', 'higher', " "'midpoint')".format(interpolation)) if indices.dtype == cupy.int32: ret = cupy.rollaxis(ap, axis) ret = ret.take(indices, axis=0, out=out) else: if out is None: ret = cupy.empty(ap.shape[:-1] + q.shape, dtype=cupy.float64) else: ret = cupy.rollaxis(out, 0, out.ndim) cupy.ElementwiseKernel( 'S idx, raw T a, raw int32 offset', 'U ret', ''' ptrdiff_t idx_below = floor(idx); U weight_above = idx - idx_below; ptrdiff_t offset_i = _ind.get()[0] * offset; ret = a[offset_i + idx_below] * (1.0 - weight_above) + a[offset_i + idx_below + 1] * weight_above; ''', 'percentile_weightnening' )(indices, ap, ap.shape[-1] if ap.ndim > 1 else 0, ret) ret = cupy.rollaxis(ret, -1) # Roll q dimension back to first axis if zerod: ret = ret.squeeze(0) if keepdims: if q.size > 1: keepdim = (-1,) + keepdim ret = ret.reshape(keepdim) return cupy.ascontiguousarray(ret)
def test_rollaxis_failure(self): a = testing.shaped_arange((2, 3, 4)) with self.assertRaises(ValueError): cupy.rollaxis(a, 3)
def _quantile_unchecked(a, q, axis=None, out=None, interpolation='linear', keepdims=False): if q.ndim == 0: q = q[None] zerod = True else: zerod = False if q.ndim > 1: raise ValueError('Expected q to have a dimension of 1.\n' 'Actual: {0} != 1'.format(q.ndim)) if keepdims: if axis is None: keepdim = (1,) * a.ndim else: keepdim = list(a.shape) for ax in axis: keepdim[ax % a.ndim] = 1 keepdim = tuple(keepdim) # Copy a since we need it sorted but without modifying the original array if isinstance(axis, int): axis = axis, if axis is None: ap = a.flatten() nkeep = 0 else: # Reduce axes from a and put them last axis = tuple(ax % a.ndim for ax in axis) keep = set(range(a.ndim)) - set(axis) nkeep = len(keep) for i, s in enumerate(sorted(keep)): a = a.swapaxes(i, s) ap = a.reshape(a.shape[:nkeep] + (-1,)).copy() axis = -1 ap.sort(axis=axis) Nx = ap.shape[axis] indices = q * (Nx - 1.) if interpolation == 'lower': indices = cupy.floor(indices).astype(cupy.int32) elif interpolation == 'higher': indices = cupy.ceil(indices).astype(cupy.int32) elif interpolation == 'midpoint': indices = 0.5 * (cupy.floor(indices) + cupy.ceil(indices)) elif interpolation == 'nearest': # TODO(hvy): Implement nearest using around raise ValueError('\'nearest\' interpolation is not yet supported. ' 'Please use any other interpolation method.') elif interpolation == 'linear': pass else: raise ValueError('Unexpected interpolation method.\n' 'Actual: \'{0}\' not in (\'linear\', \'lower\', ' '\'higher\', \'midpoint\')'.format(interpolation)) if indices.dtype == cupy.int32: ret = cupy.rollaxis(ap, axis) ret = ret.take(indices, axis=0, out=out) else: if out is None: ret = cupy.empty(ap.shape[:-1] + q.shape, dtype=cupy.float64) else: ret = cupy.rollaxis(out, 0, out.ndim) cupy.ElementwiseKernel( 'S idx, raw T a, raw int32 offset, raw int32 size', 'U ret', ''' ptrdiff_t idx_below = floor(idx); U weight_above = idx - idx_below; ptrdiff_t max_idx = size - 1; ptrdiff_t offset_bottom = _ind.get()[0] * offset + idx_below; ptrdiff_t offset_top = min(offset_bottom + 1, max_idx); U diff = a[offset_top] - a[offset_bottom]; if (weight_above < 0.5) { ret = a[offset_bottom] + diff * weight_above; } else { ret = a[offset_top] - diff * (1 - weight_above); } ''', 'percentile_weightnening' )(indices, ap, ap.shape[-1] if ap.ndim > 1 else 0, ap.size, ret) ret = cupy.rollaxis(ret, -1) # Roll q dimension back to first axis if zerod: ret = ret.squeeze(0) if keepdims: if q.size > 1: keepdim = (-1,) + keepdim ret = ret.reshape(keepdim) return _core._internal_ascontiguousarray(ret)