def test_periodogram(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): data = 10 * data generated = spectral.spectrum(data) reference = np.abs(np.fft.rfft(data))**2 np.testing.assert_array_almost_equal(generated, reference, 3)
def test_binary_search(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): data = np.sort(data) value = random.choice(data) generated = algorithm.binary_search(data, value) reference = list(data).index(value) self.assertAlmostEqual(generated, reference)
def test_moment(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): for i in range(2, 8): generated = statistics.moment(data, i) reference = scipy.stats.mstats.moment(data, i) self.assertAlmostEqual(generated, reference)
def test_encoder(self): repository, _ = utility.get_list_test_files() number_inputs = 10 minimum_size = 1 << 10 maximum_size = 1 << 20 sr = [8000, 11025, 22050, 32000, 44100, 48000] for data in utility.generate_inputs(number_inputs, minimum_size, maximum_size): samplerate = random.choice(sr) encoder = io.Encoder(samplerate, 1) self.assertEqual(samplerate, encoder.samplerate()) self.assertEqual(1, encoder.channels()) filename = str(uuid.uuid4()) + ".wav" f = os.path.join(repository, filename) encoder.open(f) self.assertTrue(encoder.is_open()) counter = encoder.write(data.astype(np.float32)) self.assertEqual(data.size, counter) encoder.close() decoder = io.Decoder() decoder.open(f) self.assertEqual(len(data), decoder.frames()) self.assertEqual(samplerate, decoder.samplerate()) self.assertEqual(1, decoder.channels()) recovery = decoder.read(len(data)) np.testing.assert_array_almost_equal(recovery, data) decoder.close() os.remove(f)
def test_clip(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): minimum = randint(self.__minimum_size, self.__maximum_size) maximum = randint(minimum, self.__maximum_size) generated = algorithm.clip(data, minimum, maximum) reference = np.clip(data, minimum, maximum) np.testing.assert_array_almost_equal(generated, reference)
def test_crest(self): for data in utility.generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): generated = statistics.crest(data) reference = self.__compute_crest(data) self.assertAlmostEqual(generated, reference)
def test_scale(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): factor = randint(0, 5) generated = algorithm.scale(data, factor) reference = data * factor np.testing.assert_array_almost_equal(generated, reference)
def test_real_complex_fft(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): complex_data = data.astype(np.complex128) forward = spectral.fft(complex_data) backward = spectral.ifft(forward) np.testing.assert_array_almost_equal(backward, complex_data)
def test_real_ifft(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): forward = spectral.rfft(data) generated = spectral.irfft(forward) reference = np.fft.irfft(forward) np.testing.assert_array_almost_equal(generated, reference)
def test_entropy(self): import math for data in utility.generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): generated = statistics.entropy(data) reference = self.__compute_entropy(data) self.assertAlmostEqual(generated, reference)
def test_hartley(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): reference = spectral.hartley(data) temporal = spectral.rfft(data) generated = np.real(temporal) - np.imag(temporal) np.testing.assert_array_almost_equal(reference[:len(generated)], generated)
def test_concatenate(self): data = generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size) for _ in range(0, len(data)): first = data[randint(0, len(data) - 1)] second = data[randint(0, len(data) - 1)] generated = algorithm.concatenate(first, second) reference = np.concatenate((first, second)) np.testing.assert_array_almost_equal(generated, reference)
def test_equal(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): duplicate = np.copy(data) self.assertTrue(algorithm.equal(data, duplicate)) editions = random.randint(1, len(data) - 1) indexes = random.sample(range(0, len(data) - 1), editions) duplicate[indexes] = 2 * duplicate[indexes] self.assertFalse(algorithm.equal(data, duplicate))
def test_min(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): generated = statistics.min(data) reference = min(data) self.assertAlmostEqual(generated, reference) generated = statistics.min_abs(data) reference = min(data, key=abs) self.assertAlmostEqual(generated, reference)
def test_peak(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): generated, _ = statistics.peak(data) reference = np.argmax(data) self.assertAlmostEqual(generated, reference) generated, _ = statistics.peak_abs(data) reference = np.argmax(np.abs(data)) self.assertAlmostEqual(generated, reference)
def test_index_of(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): value = random.choice(data) generated = algorithm.index_of(data, value) reference = list(data).index(value) self.assertAlmostEqual(generated, reference)
def test_idct(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): forward = spectral.dct(data) backward = spectral.idct(forward) np.testing.assert_array_almost_equal(data, backward)
def test_dct(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): generated = spectral.dct(data) reference = fftpack.dct(data) np.testing.assert_array_almost_equal(generated, reference)
def test_pad(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): size = randint(len(data), self.__maximum_size) generated = algorithm.pad(data, size) reference = np.pad(data, (0, size - len(data) % size), mode='constant', constant_values=0) np.testing.assert_array_almost_equal(generated, reference)
def test_hilbert(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): reference = spectral.hilbert(data) generated = signal.hilbert(data) np.testing.assert_array_almost_equal(reference, generated)
def test_abs(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): generated = algorithm.abs(data) reference = np.abs(data) np.testing.assert_array_almost_equal(generated, reference)
def test_normalize(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): generated = algorithm.normalize(data) reference = data / np.max(np.abs(data)).item() np.testing.assert_array_almost_equal(generated, reference)
def test_skewness(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): generated = statistics.skewness(data) reference = scipy.stats.mstats.skew(data) self.assertAlmostEqual(generated, reference.item())
def test_std(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): generated = statistics.standard_deviation(data) reference = np.std(data) self.assertAlmostEqual(generated, reference.item())
def test_variance(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): generated = statistics.variance(data) reference = np.var(data) self.assertAlmostEqual(generated, reference.item())
def test_kurtosis(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): generated = statistics.kurtosis(data) reference = scipy.stats.mstats.kurtosis(data, fisher=False) self.assertAlmostEqual(generated, reference.item())
def test_norm(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): generated = statistics.norm(data) reference = LA.norm(data) self.assertAlmostEqual(generated, reference)
def test_generalized_mean(self): for data in generate_inputs(self.__number_inputs, self.__minimum_size, self.__maximum_size): generated = statistics.geometric_mean(data) reference = scipy.stats.mstats.gmean(data) self.assertAlmostEqual(generated, reference)