def normalize(self, scale: Scale): normalized_min = (scale.normalize_point(self.min) if self.min is not None else None) normalized_max = (scale.normalize_point(self.max) if self.max is not None else None) return self.__class__(self.p, normalized_min, normalized_max, self.weight)
def test_cdf(xscale: Scale): scipydist_normed = scipy.stats.logistic(0.5, 0.05) true_loc = xscale.denormalize_point(0.5) true_s = 0.05 * xscale.width ergodist = Logistic(loc=true_loc, s=true_s, scale=xscale) for x in np.linspace(0, 1, 10): assert scipydist_normed.cdf(x) == pytest.approx(float( ergodist.cdf(xscale.denormalize_point(x))), rel=1e-3) # TODO: consider a better approach for log scale if isinstance(xscale, LogScale): for x in np.linspace(xscale.low, xscale.high, 10): assert scipydist_normed.cdf( xscale.normalize_point(x)) == pytest.approx(float( ergodist.cdf(x)), rel=1e-3) else: scipydist_true = scipy.stats.logistic(true_loc, true_s) for x in np.linspace(xscale.low, xscale.high, 10): assert scipydist_true.cdf(x) == pytest.approx(float( ergodist.cdf(x)), rel=1e-3)
def test_fit_mixture_large(fixed_params): xscale = Scale(-2, 3) data1 = onp.random.logistic(loc=0.7, scale=0.1, size=1000) data2 = onp.random.logistic(loc=0.4, scale=0.2, size=1000) data = onp.concatenate([data1, data2]) mixture = LogisticMixture.from_samples( data=data, fixed_params=fixed_params, scale=xscale, ) # FIXME: What's going on below with scales? components = sorted([(component.base_dist.loc, component.base_dist.s) for component in mixture.components]) assert components[0][0] == pytest.approx(xscale.normalize_point(0.4), abs=0.2) assert components[1][0] == pytest.approx(xscale.normalize_point(0.7), abs=0.2) assert components[0][1] == pytest.approx(0.2, abs=0.2) assert components[1][1] == pytest.approx(0.1, abs=0.2)
def normalize(self, scale: Scale): normalized_mean = scale.normalize_point(self.mean) return self.__class__(normalized_mean, self.weight)
def normalize(self, scale: Scale): normalized_outcome = scale.normalize_point(self.outcome) return self.__class__(normalized_outcome, self.weight)
def normalize(self, scale: Scale): normalized_xs = np.array([scale.normalize_point(x) for x in self.xs]) normalized_densities = np.array( [density * scale.width for density in self.densities]) return self.__class__(normalized_xs, normalized_densities, self.weight)
class Logistic(Distribution): loc: float # normalized s: float # normalized scale: Scale metadata: Any = None def __init__( self, loc: float, s: float, scale: Optional[Scale] = None, metadata=None, normalized=False, ): # TODO (#303): Raise ValueError on scale < 0 if normalized: self.loc = loc self.s = np.max([s, 0.0000001]) self.metadata = metadata if scale is not None: self.scale = scale else: self.scale = Scale(0, 1) self.true_s = self.s * self.scale.width self.true_loc = self.scale.denormalize_point(loc) elif scale is None: raise ValueError("Either a Scale or normalized parameters are required") else: self.loc = scale.normalize_point(loc) self.s = np.max([s, 0.0000001]) / scale.width self.scale = scale self.metadata = metadata self.true_s = s # convenience field only used in repr currently self.true_loc = loc # convenience field only used in repr currently def __repr__(self): return ( f"Logistic(scale={self.scale}, true_loc={self.true_loc}, " f"true_s={self.true_s}, normed_loc={self.loc}, normed_s={self.s}," f" metadata={self.metadata})" ) # Distribution def pdf(self, x): y = (self.scale.normalize_point(x) - self.loc) / self.s p = np.exp(scipy.stats.logistic.logpdf(y) - np.log(self.s)) return p / self.scale.width def logpdf(self, x): y = (self.scale.normalize_point(x) - self.loc) / self.s logp = scipy.stats.logistic.logpdf(y) - np.log(self.s) return logp - np.log(self.scale.width) def cdf(self, x): y = (self.scale.normalize_point(x) - self.loc) / self.s return scipy.stats.logistic.cdf(y) def ppf(self, q): return self.scale.denormalize_point( oscipy.stats.logistic(loc=self.loc, scale=self.s).ppf(q) ) def sample(self): return self.scale.denormalize_point( oscipy.stats.logistic.rvs(loc=self.loc, scale=self.s) ) # Scaled def normalize(self): """ Return the normalized condition. :param scale: the true scale :return: the condition normalized to [0,1] """ return self.__class__( self.loc, self.s, Scale(0, 1), self.metadata, normalized=True ) def denormalize(self, scale: Scale): """ Assume that the distribution has been normalized to be over [0,1]. Return the distribution on the true scale :param scale: the true scale """ return self.__class__(self.loc, self.s, scale, self.metadata, normalized=True) # Structured @classmethod def structure(self, params): class_params, numeric_params = params self_class, scale_classes = class_params self_numeric, scale_numeric = numeric_params scale = scale_classes[0].structure((scale_classes, scale_numeric)) return self_class( loc=self_numeric[0], s=self_numeric[1], scale=scale, normalized=True ) def destructure(self): scale_classes, scale_numeric = self.scale.destructure() class_params = (self.__class__, scale_classes) self_numeric = (self.loc, self.s) numeric_params = (self_numeric, scale_numeric) return (class_params, numeric_params)