def int_value_noise_3d(x, y, z, seed): x = x.astype(int) y = y.astype(int) z = z.astype(int) c = (__BN_X_NOISE_GEN * x) + (__BN_Y_NOISE_GEN * y) + ( __BN_Z_NOISE_GEN * z) + __BN_SEED_NOISE_GEN * seed c = cp.bitwise_and(c, 0x7fffffff) c = cp.bitwise_xor(cp.right_shift(c, 13), c) c = cp.bitwise_and((c * (c * c * 60493 + 19990303) + 1376312589), 0x7fffffff) return c
def inject_error(weight, mask0, mask1, num_bits=32): if num_bits == 32: dtype = cp.uint32 ftype = cp.float32 shape = weight.shape weight_flatten = cp.ravel(weight).view(dtype) mask0, mask0_bit = mask0 mask1, mask1_bit = mask1 zero = cp.zeros(1, dtype=dtype) if (mask0.__len__() is not 0) or (mask1.__len__() is not 0): for b in range(num_bits): fault = cp.full(weight_flatten.size, 2**b, dtype=dtype) bit_loc0 = cp.where(mask0_bit == b, mask0, zero).nonzero()[0] bit_loc1 = cp.where(mask1_bit == b, mask1, zero).nonzero()[0] uniform0 = cp.zeros(weight_flatten.size, dtype=dtype) uniform1 = cp.zeros(weight_flatten.size, dtype=dtype) # Inject bit error if bit_loc0.__len__() > 0: cp.put(uniform0, mask0[bit_loc0], fault) cp.put(uniform1, mask1[bit_loc1], fault) # Stuck at 0 not_mask0 = cp.invert(uniform0) weight_flatten = cp.bitwise_and(weight_flatten, not_mask0) # Stuck at 1 weight_flatten = cp.bitwise_or(weight_flatten, uniform1) weight_float = weight_flatten.view(ftype) return cp.reshape(weight_float, shape) else: return weight
def decompress(self, tensor_compressed, shape): tensor_compressed, = tensor_compressed cupy_tensor = cupy.fromDlpack(to_dlpack(tensor_compressed)) sign = cupy_tensor > 127 exps = cupy.bitwise_and(cupy_tensor, 0b01111111) floats = cupy.left_shift((exps + 18).astype(cupy.int32), 23).view(cupy.float32) tensor_decompressed = cupy.where(sign, -floats, floats) tensor_decompressed = cupy.multiply((exps >= 1).astype(cupy.float32), tensor_decompressed) return from_dlpack(tensor_decompressed.toDlpack()).view(shape)
def gradient_noise_3d(fx, fy, fz, ix, iy, iz, seed): vi = (__BN_X_NOISE_GEN * ix) + (__BN_Y_NOISE_GEN * iy) + ( __BN_Z_NOISE_GEN * iz) + __BN_SEED_NOISE_GEN * seed vi = cp.bitwise_and(vi, 0xffffffff) vi = cp.bitwise_xor(vi, cp.right_shift(vi, __BN_SHIFT_NOISE_GEN)) vi = cp.bitwise_and(vi, 0xff) vi_l2 = cp.left_shift(vi, 2) xvGrad = g_randomVectors[vi_l2] yvGrad = g_randomVectors[vi_l2 + 1] zvGrad = g_randomVectors[vi_l2 + 2] xvPoint = fx - ix yvPoint = fy - iy zvPoint = fz - iz return ((xvGrad * xvPoint) + (yvGrad * yvPoint) + (zvGrad * zvPoint)) * 2.12
def jaccard(im1, im2): im1 = np.asarray(im1).astype(np.bool) im2 = np.asarray(im2).astype(np.bool) if im1.shape != im2.shape: raise ValueError("Shape mismatch: im1 and im2 must have the same shape.") # cupy im1_cp = cp.asarray(im1) im2_cp = cp.asarray(im2) JI = np.double(cp.sum(cp.bitwise_and(im1_cp, im2_cp))) / np.double(cp.sum(cp.bitwise_or(im1_cp, im2_cp))) return JI
def bitwise_and(x1: Array, x2: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.bitwise_and <numpy.bitwise_and>`. See its docstring for more information. """ if (x1.dtype not in _integer_or_boolean_dtypes or x2.dtype not in _integer_or_boolean_dtypes): raise TypeError( "Only integer or boolean dtypes are allowed in bitwise_and") # Call result type here just to raise on disallowed type combinations _result_type(x1.dtype, x2.dtype) x1, x2 = Array._normalize_two_args(x1, x2) return Array._new(np.bitwise_and(x1._array, x2._array))
def _batch_fingerprint(self, h, cuda='auto'): ''' Takes a sequence of hash values and creates a minHash fingerprint of length n_perm or 2*n_perm if mirror=True. ''' h = np.array(h, dtype=np.uint32)[:, np.newaxis] a, b = self.permutations if cuda == 'auto': cuda = _CUDA and (len(h) >= _MIN_CUDA_SIZE) if cuda: # Pass data to the GPU h = cp.asarray(h) a = cp.asarray(a) b = cp.asarray(b) p = cp.asarray(np.uint64(self._mersenne_prime)) m = cp.asarray(np.uint64(self._max_hash)) # Run same universal hashing algorithm as cpu version H = cp.tile(h, self.n_perm) H = cp.bitwise_and((a * H + b) % p, m) f = cp.asnumpy(H.min(axis=0)) if self.mirror: f_mirrored = cp.asnumpy(H.max(axis=0)) f_mirrored = self._max_hash - f_mirrored f = np.hstack([f, f_mirrored]) # Clear gpu cache cp.get_default_memory_pool().free_all_blocks() else: H = np.tile(h, self.n_perm) H = np.bitwise_and((a * H + b) % self._mersenne_prime, np.uint64(self._max_hash)) f = H.min(axis=0) if self.mirror: f_mirrored = H.max(axis=0) f_mirrored = self._max_hash - f_mirrored f = np.hstack([f, f_mirrored]) return f.astype(np.uint32)
def __rand__(self, other): return cupy.bitwise_and(other, self)
def __and__(self, other): return cupy.bitwise_and(self, other)
def P_generator_SL(MatingPool, Pop_Gradient, Boundary, Coding, MaxOffspring): N, D = MatingPool.shape if MaxOffspring < 1 or MaxOffspring > N: MaxOffspring = N if Coding == "Real": ProC = 1 ProM = 1 / D DisC = 20 DisM = 20 Out = Pop_Gradient Offspring = np.zeros((N, D)) for i in range(0, N, 2): flag = np.random.rand(1) > 0.5 #>1 时 miu1 = np.random.rand(D, ) / 2 miu2 = np.random.rand(D, ) / 2 + 0.5 miu_temp = np.random.random((D, )) dictor = MatingPool[i, :] > MatingPool[i + 1, :] MatingPool[i][dictor], MatingPool[i + 1][dictor] = MatingPool[ i + 1][dictor], MatingPool[i][dictor] Out[i][dictor], Out[i + 1][dictor] = Out[i + 1][dictor], Out[i][dictor] G_temp = Out[i:i + 2, :].copy() ## L = G_temp[0, :].copy() P = miu1.copy() P[L > 0] = miu2[L > 0].copy() P[L == 0] = miu_temp[L == 0].copy() miu = P.copy() beta = np.zeros((D, )) beta[miu <= 0.5] = (2 * miu[miu <= 0.5])**(1 / (DisC + 1)) beta[miu > 0.5] = (2 - 2 * miu[miu > 0.5])**(-1 / (DisC + 1)) beta[np.random.random((D, )) > ProC] = 1 if flag == True: beta[MatingPool[i] == 0] = 1 Offspring[i, :] = ( (MatingPool[i, :] + MatingPool[i + 1, :]) / 2) + (np.multiply( beta, (MatingPool[i, :] - MatingPool[i + 1, :]) / 2)) ## L = -G_temp[0, :].copy() P = miu1.copy() P[L > 0] = miu2[L > 0].copy() P[L == 0] = miu_temp[L == 0].copy() miu = P.copy() beta = np.zeros((D, )) beta[miu <= 0.5] = (2 * miu[miu <= 0.5])**(1 / (DisC + 1)) beta[miu > 0.5] = (2 - 2 * miu[miu > 0.5])**(-1 / (DisC + 1)) beta[np.random.random((D, )) > ProC] = 1 if flag == True: beta[MatingPool[i + 1] == 0] = 1 Offspring[i + 1, :] = ( (MatingPool[i, :] + MatingPool[i + 1, :]) / 2) - (np.multiply( beta, (MatingPool[i, :] - MatingPool[i + 1, :]) / 2)) Out[i][dictor], Out[i + 1][dictor] = Out[i + 1][dictor], Out[i][dictor] # k1 = np.random.rand(D, ) > 0.5 L = G_temp[0, :].copy() k2 = Offspring[i, :] != 0 kl1 = np.bitwise_and(k1, L < 0) L = -G_temp[1, :].copy() k2 = Offspring[i + 1, :] != 0 # kl2 = np.bitwise_and(np.bitwise_and(k1, L < 0), k2) kl2 = np.bitwise_and(k1, L < 0) Offspring[i][kl1], Offspring[i + 1][kl2] = Offspring[ i + 1][kl1], Offspring[i][kl2] Out[i][kl1], Out[i + 1][kl2] = Out[i + 1][kl1], Out[i][kl2] Offspring[i][dictor], Offspring[i + 1][dictor] = Offspring[ i + 1][dictor], Offspring[i][dictor] Offspring_temp = Offspring[:MaxOffspring, :].copy() Offspring = Offspring_temp if MaxOffspring == 1: MaxValue = Boundary[0, :] MinValue = Boundary[1, :] else: MaxValue = np.tile(Boundary[0, :], (MaxOffspring, 1)) MinValue = np.tile(Boundary[1, :], (MaxOffspring, 1)) # k = np.random.random((MaxOffspring, D)) miu = np.random.random((MaxOffspring, D)) Temp = np.bitwise_and(k <= ProM, miu < 0.5) # Offspring[Temp] = Offspring[Temp] + np.multiply((MaxValue[Temp] - MinValue[Temp]), # ((2 * miu[Temp] + np.multiply( # 1 - 2 * miu[Temp], # (1 - (Offspring[Temp] - MinValue[Temp]) / ( # MaxValue[Temp] - MinValue[Temp])) ** ( # DisM + 1))) ** (1 / ( # DisM + 1)) - 1)) Offspring[Temp] = 0 Temp = np.bitwise_and(k <= ProM, miu >= 0.5) # # Offspring[Temp] = Offspring[Temp] + np.multiply((MaxValue[Temp] - MinValue[Temp]), # (1 - ((2 * (1 - miu[Temp])) + np.multiply( # 2 * (miu[Temp] - 0.5), # (1 - (MaxValue[Temp] - Offspring[Temp]) / ( # MaxValue[Temp] - MinValue[Temp])) ** ( # DisM + 1))) ** (1 / ( # DisM + 1)))) Offspring[Temp] = 0 Offspring[Offspring > MaxValue] = MaxValue[Offspring > MaxValue] Offspring[Offspring < MinValue] = MinValue[Offspring < MinValue] elif Coding == "Binary": Offspring = [] elif Coding == "DE": Offspring = [] return Offspring