def __init__(self, alpha, beta, name, learnable=False, has_bias=False, is_observed=False, is_policy=False, is_reward=False): self._type = "Beta" concentration1 = alpha concentration0 = beta ranges = {"concentration1": geometric_ranges.RightHalfLine(0.), "concentration0": geometric_ranges.RightHalfLine(0.)} super().__init__(name, concentration1=concentration1, concentration0=concentration0, learnable=learnable, has_bias=has_bias, ranges=ranges, is_observed=is_observed, is_policy=is_policy, is_reward=is_reward) self.distribution = distributions.BetaDistribution()
def __init__(self, alpha, beta, name, learnable=False): self._type = "Logit Normal" ranges = { "alpha": geometric_ranges.RightHalfLine(0.), "beta": geometric_ranges.RightHalfLine(0.) } super().__init__(name, alpha=alpha, beta=beta, learnable=learnable, ranges=ranges) self.distribution = distributions.BetaDistribution()
def __init__(self, alpha, beta, name, learnable=False, is_observed=False): self._type = "Logit Normal" concentration1 = alpha concentration0 = beta ranges = { "concentration1": geometric_ranges.RightHalfLine(0.), "concentration0": geometric_ranges.RightHalfLine(0.) } super().__init__(name, concentration1=concentration1, concentration0=concentration0, learnable=learnable, ranges=ranges, is_observed=is_observed) self.distribution = distributions.BetaDistribution()
def __init__(self, loc, scale, name, learnable=False, has_bias=False, is_observed=False, is_policy=False, is_reward=False): self._type = "Log Normal" ranges = {"loc": geometric_ranges.UnboundedRange(), "scale": geometric_ranges.RightHalfLine(0.)} super().__init__(name, loc=loc, scale=scale, learnable=learnable, has_bias=has_bias, ranges=ranges, is_observed=is_observed, is_policy=is_policy, is_reward=is_reward) self.distribution = distributions.LogNormalDistribution()
def __init__(self, df, loc, scale, name, learnable=False, is_observed=False): self._type = "StudentT" ranges = {"df": geometric_ranges.UnboundedRange(), "loc": geometric_ranges.UnboundedRange(), "scale": geometric_ranges.RightHalfLine(0.)} super().__init__(name, df=df, loc=loc, scale=scale, learnable=learnable, ranges=ranges, is_observed=is_observed) self.distribution = distributions.StudentTDistribution()
def __init__(self, concentration, name, learnable=False, has_bias=False, is_observed=False, is_policy=False, is_reward=False): self._type = "Dirichlet" ranges = {"concentration": geometric_ranges.RightHalfLine(0.)} super().__init__(name, concentration=concentration, learnable=learnable, has_bias=has_bias, ranges=ranges, is_observed=is_observed, is_policy=is_policy, is_reward=is_reward) self.distribution = distributions.DirichletDistribution()
def __init__(self, tau, p, name, learnable=False): self._type = "Concrete" ranges = { "tau": geometric_ranges.RightHalfLine(0.), "p": geometric_ranges.Simplex() } super().__init__(name, tau=tau, p=p, learnable=learnable, ranges=ranges) self.distribution = distributions.ConcreteDistribution()
def __init__(self, loc, scale, name, learnable=False): self._type = "Logit Normal" ranges = { "loc": geometric_ranges.UnboundedRange(), "scale": geometric_ranges.RightHalfLine(0.) } super().__init__(name, loc=loc, scale=scale, learnable=learnable, ranges=ranges) self.distribution = distributions.LogitNormalDistribution()
def __init__(self, mu, sigma, name, learnable=False): self._type = "Logit Normal" ranges = { "mu": geometric_ranges.UnboundedRange(), "sigma": geometric_ranges.RightHalfLine(0.) } super().__init__(name, mu=mu, sigma=sigma, learnable=learnable, ranges=ranges) self.distribution = distributions.LogitNormalDistribution()
def __init__(self, rate, name, learnable=False, has_bias=False, is_observed=False): self._type = "Poisson" ranges = {"rate": geometric_ranges.RightHalfLine(0.)} super().__init__(name, rate=rate, learnable=learnable, has_bias=has_bias, ranges=ranges, is_observed=is_observed) self.distribution = distributions.PoissonDistribution()
def __init__(self, scale, name, learnable=False, has_bias=False, is_observed=False): self._type = "HalfNormal" ranges = {"scale": geometric_ranges.RightHalfLine(0.)} super().__init__(name, scale=scale, learnable=learnable, has_bias=has_bias, ranges=ranges, is_observed=is_observed) self.distribution = distributions.HalfNormalDistribution()
def __init__(self, loc, scale, name, learnable=False, has_bias=False, is_observed=False): self._type = "Cauchy" ranges = { "loc": geometric_ranges.UnboundedRange(), "scale": geometric_ranges.RightHalfLine(0.) } super().__init__(name, loc=loc, scale=scale, learnable=learnable, has_bias=has_bias, ranges=ranges, is_observed=is_observed) self.distribution = distributions.CauchyDistribution()
def __init__(self, mu, cov=None, chol_cov=None, diag_cov=None, name="Multivariate Normal", learnable=False): self._type = "Multivariate Normal" if chol_cov is not None and diag_cov is None: ranges = { "mu": geometric_ranges.UnboundedRange(), "chol_cov": geometric_ranges.UnboundedRange() } super().__init__(name, mu=mu, chol_cov=chol_cov, learnable=learnable, ranges=ranges) self.distribution = distributions.CholeskyMultivariateNormal() elif diag_cov is not None and chol_cov is None: ranges = { "mean": geometric_ranges.UnboundedRange(), "var": geometric_ranges.RightHalfLine(0.) } super().__init__(name, mean=mu, var=diag_cov, learnable=learnable, ranges=ranges) self.distribution = distributions.NormalDistribution() else: raise ValueError( "Either chol_cov (cholesky factor of the covariance matrix) or " + "diag_cov (diagonal of the covariance matrix) need to be provided as input" )
def __init__(self, rate, name, learnable=False, has_bias=False, is_observed=False, is_policy=False, is_reward=False): self._type = "Exponential" ranges = {"rate": geometric_ranges.RightHalfLine(0.)} super().__init__(name, rate=rate, learnable=learnable, has_bias=has_bias, ranges=ranges, is_observed=is_observed, is_policy=is_policy, is_reward=is_reward) self.distribution = distributions.ExponentialDistribution()