def test_overlap(self): d1 = distributions.Distribution( measurements=[value.ValueUncertainty()]) d2 = distributions.Distribution( measurements=[value.ValueUncertainty()]) self.assertAlmostEqual(distributions.distributions_ops.overlap(d1, d2), 1.0)
def windowed_smooth(measurements, size=1, population_size=512): """ Returns a new list of measurements with the same length as the source measurements, where each value in the result is calculated as the median and weighted MAD of the nearest +/- size measurements and the measurement itself. For example, if the resulting value for measurement X10 with a size of 2 would be median +/- the weighted MAD of the measurements (X8, X9, X10, X11, X12) Edge conditions are handled so that they are smoothed with partial windows :param measurements: :param size: The extend of the smoothing window. :param population_size: :return: """ window = [] window_populations = [] while len(window) < size + 1: m = measurements[len(window)] window.append(m) window_populations.append( mdists.population(mdists.Distribution([m]), count=population_size)) out = [] for i in range(len(measurements)): pop_combined = [] for p in window_populations: pop_combined += p out.append( ValueUncertainty( mdists.percentile(pop_combined), mdists.weighted_median_average_deviation(pop_combined))) append_index = i + size + 1 if append_index >= len(measurements): window.pop(0) window_populations.pop(0) continue m = measurements[append_index] d = mdists.Distribution([m]) pop = mdists.population(d, population_size) window.append(m) window_populations.append(pop) while len(window) > (2 * size + 1): window.pop(0) window_populations.pop(0) return out
def test_compareAgainstGaussian3(self): d1 = distributions.Distribution(measurements=[value.ValueUncertainty()]) d2 = distributions.Distribution(measurements=[ value.ValueUncertainty(5.0), value.ValueUncertainty(8.0), value.ValueUncertainty(10.0, 2.0) ]) self.assertGreaterEqual(distributions.distributions_ops.overlap(d1, d2), 0.0) self.assertLess(distributions.distributions_ops.overlap(d1, d2), 0.06)
def test_getAdaptiveRangeMulti(self): measurements = [ value.ValueUncertainty(), value.ValueUncertainty(2.0, 0.5) ] dist = distributions.Distribution(measurements=measurements) result = distributions.distributions_ops.adaptive_range(dist, 10.0)
def test_tukey_box(self): measurements = [] while len(measurements) < 6: measurements.append(value.ValueUncertainty()) dist = distributions.Distribution(measurements=measurements) unweighted = boxes.unweighted_tukey(dist) weighted = boxes.weighted_tukey(dist)
def test_doubleDensityOverlap(self): """ Two overlapping measurement values should have a total probability of unity """ x_values = np.linspace(-10.0, 10.0, 100) measurements = [value.ValueUncertainty(), value.ValueUncertainty()] dist = distributions.Distribution(measurements=measurements) area = get_area_under_curve(x_values, dist.probabilities_at(x_values)) self.assertAlmostEqual(area, 1.0, 3)
def test_singleDensityMedian(self): """ The median of a single measurement should be at the value of that measurement """ for i in range(10): measurements = [value.ValueUncertainty.create_random(-100, 100)] dist = distributions.Distribution(measurements=measurements) median = distributions.distributions_ops.percentile(dist) self.assertAlmostEqual(median, measurements[0].value, delta=0.1)
def test_percentiles(self): measurements = [] for i in range(400): measurements.append(value.ValueUncertainty.create_random()) dist = distributions.Distribution(measurements) self.assertAlmostEqual(0, distributions.distributions_ops.percentile( dist, count=6000), delta=0.075)
def test_generalizedGetMedian(self): for i in range(10): measurements = [ value.ValueUncertainty.create_random(-100.0, -5.0), value.ValueUncertainty.create_random(-500.0, -20.0), value.ValueUncertainty.create_random(-100.0, -50.0), value.ValueUncertainty.create_random(), value.ValueUncertainty.create_random(), value.ValueUncertainty.create_random() ] dist = distributions.Distribution(measurements=measurements) result = distributions.distributions_ops.percentile(dist)
def test_doubleDensityOffset(self): """ Two measurements with different values and uncertainties should still result in a total probability of unity """ x_values = np.linspace(-10.0, 25.0, 100) measurements = [ value.ValueUncertainty(), value.ValueUncertainty(2.0, 2.0) ] dist = distributions.Distribution(measurements=measurements) area = get_area_under_curve(x_values, dist.probabilities_at(x_values)) self.assertAlmostEqual(area, 1.0, 3)
def box_smooth(measurements, size=2, population_size=512): """ :param measurements: :param size: :param population_size: :return: """ out = [] for i in range(0, len(measurements), size): d = mdists.Distribution(measurements[i:(i + size)]) pop = mdists.population(d, count=population_size) median = mdists.percentile(pop) mad = mdists.weighted_median_average_deviation(pop) while len(out) < (i + size): out.append(ValueUncertainty(median, mad)) return out
def test_weighted_mad(self): """ """ delta = 1.8 measurements = [] for index in range(10): measurements.append(value.ValueUncertainty(delta, 1.1)) measurements.append(value.ValueUncertainty(-delta, 1.1)) dist = distributions.Distribution(measurements) median = distributions.distributions_ops.percentile(dist, 0.5) mad = distributions.distributions_ops.weighted_median_average_deviation( dist) self.assertAlmostEqual(median, 0, places=1) self.assertAlmostEqual(mad, delta, delta=0.5, msg='Median: {}'.format(median))
def test_getAdaptiveRange(self): dist = distributions.Distribution( measurements=[value.ValueUncertainty()]) result = distributions.distributions_ops.adaptive_range(dist, 10.0)
def test_compareAgainstGaussian2(self): d1 = distributions.Distribution( measurements=[value.ValueUncertainty()]) d2 = distributions.Distribution( measurements=[value.ValueUncertainty(uncertainty=0.5)]) self.assertLess(distributions.distributions_ops.overlap(d1, d2), 0.7)