def test_binary_ufuncs_scalar(self): ''' Binary math ufuncs between numeric types and scalars must match numpy exactly. In certain cases however, FastArray returns a different dtype, so the CRC check will fail. In floor_divide, numpy returns -0.0 for floats: ex. 0.0/1 FastArray returns 0.0 ''' nonzero_nums = list(range(1, 10)) num_types = int_types + float_types math_ufuncs = [ np.add, np.subtract, np.multiply, np.divide, np.true_divide, np.floor_divide, ] for dt in num_types: fa_arr = FastArray(nonzero_nums, dtype=dt) np_arr = np.array(nonzero_nums, dtype=dt) for func in math_ufuncs: for scalar in (3, 2.1): fa_result = func(fa_arr, scalar) np_result = func(np_arr, scalar) if fa_result.dtype == np_result.dtype: fa_result = rc.CalculateCRC(fa_result) np_result = rc.CalculateCRC(np_result) self.assertEqual( fa_result, np_result, msg= f"Test failed on function {func} with dtype {dt} and scalar {scalar}.", )
def test_bitwise(self): ''' These functions only apply to integer or boolean arrays. ''' bitwise_funcs = [np.bitwise_and, np.bitwise_xor, np.bitwise_or] for dt in int_types: fa_arr = FastArray(num_list, dtype=dt) np_arr = fa_arr._np for func in bitwise_funcs: fa_result = func(fa_arr, fa_arr[::-1]) np_result = func(np_arr, np_arr[::-1]) if fa_result.dtype == np_result.dtype: fa_result = rc.CalculateCRC(fa_result) np_result = rc.CalculateCRC(np_result) self.assertEqual( fa_result, np_result, msg=f"Test failed on function {func} with dtype {dt}", )
def test_unary_ufuncs_scalar(self): ''' ***incomplete test - need to handle the rest of the unary funcs for as many types as possible np.negative and np.positive both raise errors when applied to boolean arrays ''' random_nums1 = (np.random.rand(10) * 100) + 1 num_types = int_types + float_types # TODO: add separate test for boolean arrays to prevent natural crash # num_types.remove(np.bool_) math_ufuncs = [ np.absolute, np.abs, np.fabs, np.floor, np.ceil, np.trunc, np.round, np.rint, ] for dt in num_types: fa_arr = FastArray(random_nums1, dtype=dt) np_arr = np.array(random_nums1, dtype=dt) for func in math_ufuncs: fa_result = func(fa_arr) np_result = func(np_arr) if fa_result.dtype == np_result.dtype: fa_result = rc.CalculateCRC(fa_result) np_result = rc.CalculateCRC(np_result) self.assertEqual( fa_result, np_result, msg=f"Test failed on function {func} with dtype {dt}", ) else: self.assertAlmostEqual( fa_result, np_result, msg=f"Test failed on function {func} with dtype {dt}", )
def test_basic(self) -> None: # Array size (in bytes, since that's what CRC is going to look at). # This is a reasonably large, PRIME value so we make sure all code paths # are exercised (e.g. don't want to fall into some special case that # only handles arrays smaller than 1000 bytes or something). array_size_bytes = 82727 # Create a "random" array (but seed the RNG so this test is deterministic). rng = default_rng(123456) bytes = np.frombuffer(rng.bytes(array_size_bytes), dtype=np.int8) # Calculate the CRC32C of several slices of the array. # We'll use these values to check if we're seeing hash-like cascading behavior; # this also allows us to avoid test breakage if the RNG algorithm changes # or otherwise has differences between numpy versions or systems. results = np.empty( shape=500, dtype=np.uint64 ) # NOTE: dtype needs to be set based on the expected output type of the CRC for i in range(len(results)): # Calculate the CRC32C or CRC64 of the array. crc_result = rc.CalculateCRC(bytes[i:]) results[i] = crc_result # What's the average hash value? It should be roughly half the range of a uint32. avg_hash = results.mean() uniform_mean = float( np.iinfo(results.dtype).max ) / 2.0 # Mean of a discrete uniform distribution over [0, results.dtype.max] avg_hash_accuracy = avg_hash / uniform_mean # How far off is the hash avg from the midpoint of uint32? # It should only be by a few % (which should be lower / closer to the midpoint as the sample count increases). hash_avg_error = abs(1.0 - avg_hash_accuracy) assert hash_avg_error < 0.05,\ f'The mean of the calculated hash values differs from the expected mean {np.dtype(results.dtype).name}.max/2.0 by {hash_avg_error * 100}%.' # Check the variance too to see if it's reasonably close to # what we'd expect from a uniform distribution. hash_var = results.astype(np.float64).var() # This is population variance (not sample var), but it's good enough for this test. uniform_variance = (float(np.iinfo(np.uint64).max)**2 - 1) / 12.0 hash_var_accuracy = hash_var / uniform_variance hash_var_error = abs(1.0 - hash_var_accuracy) assert hash_var_error < 0.10,\ f'The variance of the calculated hash values differs from the expected variance by {hash_var_error * 100}%.'
def _FUNNEL_ALL(cls, func, *args, **kwargs): if cls.DebugUFunc is False: return func(*args, **kwargs) ############################################# # We are in debug mode below, time and record ############################################# startTime = GetNanoTime() result = func(*args, **kwargs) delta = (GetNanoTime() - startTime) / 1000000000.0 cls.TotalTime += delta cls.TotalOperations += 1 deltaTime = float("{0:.9f}".format(delta)) # add to the ledger # cls.UFuncLedger.append(f"{func.__name__}\t{cls.TotalOperations}\t{deltaTime}\t{args}\t{kwargs}") # check if CRC option set if cls.DOCRC and isinstance(result, np.ndarray): crc = rc.CalculateCRC(result) cls.UCRC.append(crc) else: cls.UCRC.append(0) # sub name for math functions if func == rc.BasicMathTwoInputs or func == rc.BasicMathOneInput: cls.USubName.append(MATH_OPERATION(args[1]).name) else: cls.USubName.append("") # cls.UFuncLedger.append(ledger) cls.UFuncName.append(func.__name__) cls.UDeltaTime.append(deltaTime) if len(args) > 0: args0 = args[0] if isinstance(args0, tuple): args0 = args0[0] cls.UArgs1.append(f"{args0}") try: cls.UArgs1Type.append(f"{args0.dtype}") except: cls.UArgs1Type.append(f"{type(args0)}") try: cls.UArgs1Len.append(len(args0)) except: cls.UArgs1Len.append(0) if len(args) > 1: cls.UArgs2.append(f"{args[1]}") if len(args) > 2: cls.UArgs3.append(f"{args[2]}") else: cls.UArgs3.append("") else: cls.UArgs2.append("") cls.UArgs3.append("") else: cls.UArgs1Type.append(f"{type(None)}") cls.UArgs1Len.append(0) cls.UArgs1.append("") cls.UArgs2.append("") cls.UArgs3.append("") cls.UKwargs.append(f"{kwargs}") cls.UArgLen.append(len(args)) if result is not None: r0 = result if isinstance(r0, tuple): r0 = r0[0] cls.UResult.append(f"{r0}") try: cls.UResultLen.append(len(r0)) except: cls.UResultLen.append(0) try: cls.UResultType.append(f"{r0.dtype}") except: cls.UResultType.append(f"{type(r0)}") else: # nothing was returned cls.UResult.append("None") cls.UResultLen.append(0) cls.UResultType.append("") return result
def test_disallow_noncontig(self) -> None: """Test that rc.CalculateCRC raises an error when calling with a non-contiguous array.""" arr = np.arange(100) with pytest.raises(ValueError): rc.CalculateCRC(arr[::2])
def test_raises_on_bad_arg_type(self) -> None: """Test that rc.CalculateCRC raises an error when called with the wrong argument type.""" bytes = b'abcdefghi' with pytest.raises(TypeError): rc.CalculateCRC(bytes)