def __init__(self, distribution, lower, upper, transform="infer", *args, **kwargs): dtype = kwargs.get("dtype", theano.config.floatX) if lower is not None: lower = tt.as_tensor_variable(lower).astype(dtype) if upper is not None: upper = tt.as_tensor_variable(upper).astype(dtype) if transform == "infer": if lower is None and upper is None: transform = None default = None elif lower is not None and upper is not None: transform = transforms.interval(lower, upper) default = 0.5 * (lower + upper) elif upper is not None: transform = transforms.upperbound(upper) default = upper - 1 else: transform = transforms.lowerbound(lower) default = lower + 1 else: default = None super().__init__( distribution, lower, upper, default, *args, transform=transform, **kwargs )
def __init__(self, distribution, lower, upper, transform='infer', *args, **kwargs): dtype = kwargs.get('dtype', theano.config.floatX) if lower is not None: lower = tt.as_tensor_variable(lower).astype(dtype) if upper is not None: upper = tt.as_tensor_variable(upper).astype(dtype) if transform == 'infer': if lower is None and upper is None: transform = None default = None elif lower is not None and upper is not None: transform = transforms.interval(lower, upper) default = 0.5 * (lower + upper) elif upper is not None: transform = transforms.upperbound(upper) default = upper - 1 else: transform = transforms.lowerbound(lower) default = lower + 1 else: default = None super(_ContinuousBounded, self).__init__( distribution=distribution, lower=lower, upper=upper, transform=transform, default=default, *args, **kwargs)
def __init__(self, distribution, lower, upper, transform='infer', *args, **kwargs): import pymc3.distributions.transforms as transforms self.dist = distribution.dist(*args, **kwargs) self.__dict__.update(self.dist.__dict__) self.__dict__.update(locals()) if hasattr(self.dist, 'mode'): self.mode = self.dist.mode if transform == 'infer': default = self.dist.default() if not np.isinf(lower) and not np.isinf(upper): self.transform = transforms.interval(lower, upper) if default <= lower or default >= upper: self.testval = 0.5 * (upper + lower) if not np.isinf(lower) and np.isinf(upper): self.transform = transforms.lowerbound(lower) if default <= lower: self.testval = lower + 1 if np.isinf(lower) and not np.isinf(upper): self.transform = transforms.upperbound(upper) if default >= upper: self.testval = upper - 1 if issubclass(distribution, Discrete): self.transform = None
def __init__(self, distribution, lower, upper, transform="infer", *args, **kwargs): if lower is not None: lower = tt.as_tensor_variable(floatX(lower)) if upper is not None: upper = tt.as_tensor_variable(floatX(upper)) if transform == "infer": if lower is None and upper is None: transform = None default = None elif lower is not None and upper is not None: transform = transforms.interval(lower, upper) default = 0.5 * (lower + upper) elif upper is not None: transform = transforms.upperbound(upper) default = upper - 1 else: transform = transforms.lowerbound(lower) default = lower + 1 else: default = None super().__init__(distribution, lower, upper, default, *args, transform=transform, **kwargs)
def test_interval(): for a, b in [(-4, 5.5), (.1, .7), (-10, 4.3)]: domain = Unit * np.float64(b - a) + np.float64(a) trans = tr.interval(a, b) check_transform(trans, domain) check_jacobian_det(trans, domain, elemwise=True) vals = get_values(trans) close_to_logical(vals > a, True, tol) close_to_logical(vals < b, True, tol)
def test_interval(): for a, b in [(-4, 5.5), (.1, .7), (-10, 4.3)]: domain = Unit * np.float64(b-a) + np.float64(a) trans = tr.interval(a,b) check_transform_identity(trans, domain) check_jacobian_det(trans, domain, elemwise=True) vals = get_values(trans) close_to(vals > a, True, tol) close_to(vals < b, True, tol)
def __init__(self, distribution, lower, upper, transform, *args, **kwargs): self.dist = distribution.dist(*args, **kwargs) self.__dict__.update(self.dist.__dict__) self.__dict__.update(locals()) if hasattr(self.dist, 'mode'): self.mode = self.dist.mode if transform == 'interval': self.transform = transforms.interval(lower, upper)
def test_interval(): for a, b in [(-4, 5.5), (0.1, 0.7), (-10, 4.3)]: domain = Unit * np.float64(b - a) + np.float64(a) def transform_params(x, z=a, y=b): return z, y trans = tr.interval(transform_params) check_transform(trans, domain) check_jacobian_det(trans, domain, elemwise=True) vals = get_values(trans) close_to_logical(vals > a, True, tol) close_to_logical(vals < b, True, tol)
def test_upperbound(): def transform_params(rv_var): return None, 0.0 trans = tr.interval(transform_params) check_transform(trans, Rminusbig) check_jacobian_det(trans, Rminusbig, elemwise=True) check_jacobian_det(trans, Vector(Rminusbig, 2), at.dvector, [-1, -1], elemwise=True) vals = get_values(trans) close_to_logical(vals < 0, True, tol)
def test_lowerbound(): def transform_params(rv_var): return 0.0, None trans = tr.interval(transform_params) check_transform(trans, Rplusbig) check_jacobian_det(trans, Rplusbig, elemwise=True) check_jacobian_det(trans, Vector(Rplusbig, 2), at.dvector, [0, 0], elemwise=True) vals = get_values(trans) close_to_logical(vals > 0, True, tol)
def __init__(self, lower, upper): self.interval_transform = interval(lower, upper)