Example #1
0
    def fit(self, X, B, T):
        n, k = X.shape
        with pymc3.Model() as m:
            beta_sd = pymc3.Exponential(
                'beta_sd', 1.0)  # Weak prior for the regression coefficients
            beta = pymc3.Normal('beta', mu=0, sd=beta_sd,
                                shape=(k, ))  # Regression coefficients
            c = sigmoid(dot(X, beta))  # Conversion rates for each example
            k = pymc3.Lognormal('k', mu=0, sd=1.0)  # Weak prior around k=1
            lambd = pymc3.Exponential('lambd', 0.1)  # Weak prior

            # PDF of Weibull: k * lambda * (x * lambda)^(k-1) * exp(-(t * lambda)^k)
            LL_observed = log(c) + log(k) + log(
                lambd) + (k - 1) * (log(T) + log(lambd)) - (T * lambd)**k
            # CDF of Weibull: 1 - exp(-(t * lambda)^k)
            LL_censored = log((1 - c) + c * exp(-(T * lambd)**k))

            # We need to implement the likelihood using pymc3.Potential (custom likelihood)
            # https://github.com/pymc-devs/pymc3/issues/826
            logp = B * LL_observed + (1 - B) * LL_censored
            logpvar = pymc3.Potential('logpvar', logp.sum())

            self.trace = pymc3.sample(n_simulations=500,
                                      tune=500,
                                      discard_tuned_samples=True,
                                      njobs=1)
            print('done')
        print('done 2')
Example #2
0
    def __init__(self, eta, cutpoints, *args, **kwargs):
        self.eta = tt.as_tensor_variable(eta)
        self.cutpoints = tt.as_tensor_variable(cutpoints)

        pa = sigmoid(tt.shape_padleft(self.cutpoints) - tt.shape_padright(self.eta))
        p_cum = tt.concatenate([
            tt.zeros_like(tt.shape_padright(pa[:, 0])),
            pa,
            tt.ones_like(tt.shape_padright(pa[:, 0]))
        ], axis=1)
        p = p_cum[:, 1:] - p_cum[:, :-1]

        super().__init__(p=p, *args, **kwargs)
Example #3
0
    def __init__(self, eta, cutpoints, *args, **kwargs):
        self.eta = tt.as_tensor_variable(eta)
        self.cutpoints = tt.as_tensor_variable(cutpoints)

        pa = sigmoid(tt.shape_padleft(self.cutpoints) - tt.shape_padright(self.eta))
        p_cum = tt.concatenate([
            tt.zeros_like(tt.shape_padright(pa[:, 0])),
            pa,
            tt.ones_like(tt.shape_padright(pa[:, 0]))
        ], axis=1)
        p = p_cum[:, 1:] - p_cum[:, :-1]

        super(OrderedLogistic, self).__init__(p=p, *args, **kwargs)
Example #4
0
    def __init__(self, eta, cutpoints, *args, **kwargs):
        self.eta = tt.as_tensor_variable(floatX(eta))
        self.cutpoints = tt.as_tensor_variable(cutpoints)

        pa = sigmoid(self.cutpoints - tt.shape_padright(self.eta))
        p_cum = tt.concatenate([
            tt.zeros_like(tt.shape_padright(pa[..., 0])), pa,
            tt.ones_like(tt.shape_padright(pa[..., 0]))
        ],
                               axis=-1)
        p = p_cum[..., 1:] - p_cum[..., :-1]

        super().__init__(p=p, *args, **kwargs)
Example #5
0
    def __init_probs(self):
        theta, alpha, kappa, gamma, sigma = self.param_list

        # Initialize probabilities
        # Compute the response cumulative probability functions
        resp_funcs = (sigma - gamma) * sigmoid(alpha * theta + kappa) + gamma

        # CDF for responses range from 0 to 1
        # reversing the category order to make the order of cprobs s.t.
        # index i is P(X >= i)
        cprobst = tt.concatenate([
            tt.ones_like(tt.shape_padright(resp_funcs[..., 0])),
            resp_funcs[..., ::-1],
            tt.zeros_like(tt.shape_padright(resp_funcs[..., 0]))
        ],
                                 axis=-1)

        # Discrete difference across response categories to get
        # marginal probabilities.
        # Identical to tt.extra_ops.diff
        probst = (cprobst[..., :-1] - cprobst[..., 1:])

        return cprobst, probst