#print(np.mean(a, axis=0)) result = np.mean(a, axis=0) ref_result = [245.33333333, 246.33333333, 247.33333333] for p, q in zip(list(result), ref_result): print(math.isclose(p, q, rel_tol=1e-06, abs_tol=1e-06)) #print(np.mean(a, axis=1)) result = np.mean(a, axis=1) ref_result = [253., 238., 248.] for p, q in zip(list(result), ref_result): print(math.isclose(p, q, rel_tol=1e-06, abs_tol=1e-06)) print("Testing np.std:") a = np.array([253, 254, 255], dtype=np.uint8) #print(np.std(a)) print(math.isclose(np.std(a), 0.816496580927726, rel_tol=1e-06, abs_tol=1e-06)) print( math.isclose(np.std(a, axis=0), 0.816496580927726, rel_tol=1e-06, abs_tol=1e-06)) a = np.array([range(255 - 3, 255), range(240 - 3, 240), range(250 - 3, 250)], dtype=np.float) #print(np.std(a)) print(math.isclose(np.std(a), 6.289320754704403, rel_tol=1e-06, abs_tol=1e-06)) #print(np.std(a, axis=0)) result = np.std(a, axis=0) ref_result = [6.23609564, 6.23609564, 6.23609564] for p, q in zip(list(result), ref_result):
# Ulab is a numpy-like module for micropython, meant to simplify and speed up common # mathematical operations on arrays. This basic example shows mean/std on an image. # # NOTE: ndarrays cause the heap to be fragmented easily. If you run out of memory, # there's not much that can be done about it, lowering the resolution might help. import sensor, image, time from ulab import numpy as np sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat( sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240) clock = time.clock() # Create a clock object to track the FPS. while (True): img = sensor.snapshot() # Take a picture and return the image. a = np.array(img, dtype=np.uint8) print("mean: %d std:%d" % (np.mean(a), np.std(a)))
def corr(X, Y): assert (X.shape == Y.shape and len(X.shape) == 1), "Expected data vectors of equal length" assert len(X) > 1, "At least 2 data points are required" return cov(X, Y, biased=True) / (np.std(X) * np.std(Y))
def normalized_rms_ulab(values): # this function works with ndarrays only minbuf = np.mean(values) values = values - minbuf samples_sum = np.sum(values * values) return math.sqrt(samples_sum / len(values)) # Instead of using sensor data, we generate some data # The amplitude is 5000 so the rms should be around 5000/1.414 = 3536 nums_list = [int(8000 + math.sin(i) * 5000) for i in range(100)] nums_array = np.array(nums_list) def timeit(s, f, n=100): t0 = time.monotonic_ns() for _ in range(n): x = f() t1 = time.monotonic_ns() r = (t1 - t0) * 1e-6 / n print("%-30s : %8.3fms [result=%f]" % (s, r, x)) print("Computing the RMS value of 100 numbers") timeit("traditional", lambda: normalized_rms(nums_list)) timeit("ulab, with ndarray, some implementation in python", lambda: normalized_rms_ulab(nums_array)) timeit("ulab only, with list", lambda: np.std(nums_list)) timeit("ulab only, with ndarray", lambda: np.std(nums_array))
def standardise(X): axis = np.argmax(X.shape) minor_shape = np.min(X.shape) mu = np.mean(X, axis=axis).reshape((minor_shape, 1)) sigma = np.std(X, axis=axis).reshape((minor_shape, 1)) return (X - mu) / sigma