def __init__(self, p=None, logit_p=None, *args, **kwargs): super().__init__(*args, **kwargs) if sum(int(var is None) for var in [p, logit_p]) != 1: raise ValueError('Specify one of p and logit_p') if p is not None: self._is_logit = False self.p = p = tt.as_tensor_variable(p) self._logit_p = logit(p) else: self._is_logit = True self.p = tt.nnet.sigmoid(logit_p) self._logit_p = tt.as_tensor_variable(logit_p) self.mode = tt.cast(tround(self.p), 'int8')
def __init__(self, p=None, logit_p=None, *args, **kwargs): super().__init__(*args, **kwargs) if sum(int(var is None) for var in [p, logit_p]) != 1: raise ValueError('Specify one of p and logit_p') if p is not None: self._is_logit = False self.p = p = tt.as_tensor_variable(floatX(p)) self._logit_p = logit(p) else: self._is_logit = True self.p = tt.nnet.sigmoid(floatX(logit_p)) self._logit_p = tt.as_tensor_variable(logit_p) self.mode = tt.cast(tround(self.p), 'int8')
def __init__(self, n, p, *args, **kwargs): super(Multinomial, self).__init__(*args, **kwargs) p = p / tt.sum(p, axis=-1, keepdims=True) if len(self.shape) == 2: try: assert n.shape == (self.shape[0], ) except AttributeError: # this occurs when n is a scalar Python int or float n *= tt.ones(self.shape[0]) self.n = tt.shape_padright(n) self.p = p if p.ndim == 2 else tt.shape_padleft(p) else: self.n = tt.as_tensor_variable(n) self.p = tt.as_tensor_variable(p) self.mean = self.n * self.p self.mode = tt.cast(tround(self.mean), 'int32')
def __init__(self, n, p, *args, **kwargs): super(Multinomial, self).__init__(*args, **kwargs) p = p / tt.sum(p, axis=-1, keepdims=True) if len(self.shape) == 2: try: assert n.shape == (self.shape[0],) except AttributeError: # this occurs when n is a scalar Python int or float n *= tt.ones(self.shape[0]) self.n = tt.shape_padright(n) self.p = p if p.ndim == 2 else tt.shape_padleft(p) else: self.n = tt.as_tensor_variable(n) self.p = tt.as_tensor_variable(p) self.mean = self.n * self.p self.mode = tt.cast(tround(self.mean), 'int32')
def __init__(self, n, p, *args, **kwargs): super().__init__(*args, **kwargs) self.n = n = tt.as_tensor_variable(intX(n)) self.p = p = tt.as_tensor_variable(floatX(p)) self.mode = tt.cast(tround(n * p), self.dtype)
def __init__(self, alpha, beta, n, *args, **kwargs): super().__init__(*args, **kwargs) self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) self.beta = beta = tt.as_tensor_variable(floatX(beta)) self.n = n = tt.as_tensor_variable(intX(n)) self.mode = tt.cast(tround(alpha / (alpha + beta)), 'int8')
def __init__(self, n, p, *args, **kwargs): super(Binomial, self).__init__(*args, **kwargs) self.n = n = tt.as_tensor_variable(n) self.p = p = tt.as_tensor_variable(p) self.mode = tt.cast(tround(n * p), self.dtype)
def __init__(self, p, *args, **kwargs): super(Bernoulli, self).__init__(*args, **kwargs) self.p = p = tt.as_tensor_variable(p) self.mode = tt.cast(tround(p), 'int8')
def __init__(self, alpha, beta, n, *args, **kwargs): super(BetaBinomial, self).__init__(*args, **kwargs) self.alpha = alpha = tt.as_tensor_variable(alpha) self.beta = beta = tt.as_tensor_variable(beta) self.n = n = tt.as_tensor_variable(n) self.mode = tt.cast(tround(alpha / (alpha + beta)), 'int8')