Пример #1
0
    def _get_weighted_statistics(self,data,weights):
        n, tot = super(NegativeBinomialIntegerR,self)._get_weighted_statistics(data,weights)
        if n > 0:
            alpha_n, betas_n = self.alpha_0 + tot, self.beta_0 + self.r_support*n
            data, weights = flattendata(data), flattendata(weights)
            log_marg_likelihoods = \
                    special.betaln(alpha_n, betas_n) \
                        - special.betaln(self.alpha_0, self.beta_0) \
                    + (special.gammaln(data[:,na]+self.r_support)
                        - special.gammaln(data[:,na]+1) \
                        - special.gammaln(self.r_support)).dot(weights)
        else:
            log_marg_likelihoods = np.zeros_like(self.r_support)

        return n, tot, log_marg_likelihoods
Пример #2
0
    def _get_weighted_statistics(self,data,weights):
        n, tot = super(NegativeBinomialIntegerR,self)._get_weighted_statistics(data,weights)
        if n > 0:
            alpha_n, betas_n = self.alpha_0 + tot, self.beta_0 + self.r_support*n
            data, weights = flattendata(data), flattendata(weights)
            log_marg_likelihoods = \
                    special.betaln(alpha_n, betas_n) \
                        - special.betaln(self.alpha_0, self.beta_0) \
                    + (special.gammaln(data[:,na]+self.r_support)
                        - special.gammaln(data[:,na]+1) \
                        - special.gammaln(self.r_support)).dot(weights)
        else:
            log_marg_likelihoods = np.zeros_like(self.r_support)

        return n, tot, log_marg_likelihoods
Пример #3
0
 def _get_statistics(self,data):
     n = getdatasize(data)
     if n > 0:
         data = flattendata(data)
         feasible = self.r_support <= data.min()
         assert np.any(feasible)
         r_support = self.r_support[feasible]
         normalizers = (special.gammaln(data[:,na]) - special.gammaln(data[:,na]-r_support+1)
                 - special.gammaln(r_support)).sum(0)
         return n, data.sum(), normalizers, feasible
     else:
         return n, None, None, None
Пример #4
0
 def _get_statistics(self,data):
     n = getdatasize(data)
     if n > 0:
         data = flattendata(data)
         feasible = self.r_support <= data.min()
         assert np.any(feasible)
         r_support = self.r_support[feasible]
         normalizers = (special.gammaln(data[:,na]) - special.gammaln(data[:,na]-r_support+1)
                 - special.gammaln(r_support)).sum(0)
         return n, data.sum(), normalizers, feasible
     else:
         return n, None, None, None
Пример #5
0
 def _get_statistics(self,data=[]):
     n, tot = self._fixedr_distns[0]._get_statistics(data)
     if n > 0:
         data = flattendata(data)
         alphas_n, betas_n = self.alphas_0 + tot, self.betas_0 + self.r_support*n
         log_marg_likelihoods = \
                 special.betaln(alphas_n, betas_n) \
                     - special.betaln(self.alphas_0, self.betas_0) \
                 + (special.gammaln(data[:,na]+self.r_support)
                     - special.gammaln(data[:,na]+1) \
                     - special.gammaln(self.r_support)).sum(0)
     else:
         log_marg_likelihoods = np.zeros_like(self.r_support)
     return log_marg_likelihoods
Пример #6
0
 def resample(self,data=[],niter=20):
     if getdatasize(data) == 0:
         self.p = np.random.beta(self.alpha_0,self.beta_0)
         self.r = np.random.gamma(self.k_0,self.theta_0)
     else:
         data = atleast_2d(flattendata(data))
         N = len(data)
         for itr in range(niter):
             ### resample r
             msum = sample_crp_tablecounts(self.r,data).sum()
             self.r = np.random.gamma(self.k_0 + msum, 1/(1/self.theta_0 - N*np.log(1-self.p)))
             ### resample p
             self.p = np.random.beta(self.alpha_0 + data.sum(), self.beta_0 + N*self.r)
     return self
Пример #7
0
 def _get_statistics(self,data=[]):
     n, tot = self._fixedr_distns[0]._get_statistics(data)
     if n > 0:
         data = flattendata(data)
         alphas_n, betas_n = self.alphas_0 + tot, self.betas_0 + self.r_support*n
         log_marg_likelihoods = \
                 special.betaln(alphas_n, betas_n) \
                     - special.betaln(self.alphas_0, self.betas_0) \
                 + (special.gammaln(data[:,na]+self.r_support)
                     - special.gammaln(data[:,na]+1) \
                     - special.gammaln(self.r_support)).sum(0)
     else:
         log_marg_likelihoods = np.zeros_like(self.r_support)
     return log_marg_likelihoods
Пример #8
0
 def resample(self,data=[],niter=20):
     if getdatasize(data) == 0:
         self.p = np.random.beta(self.alpha_0,self.beta_0)
         self.r = np.random.gamma(self.k_0,self.theta_0)
     else:
         data = np.atleast_2d(flattendata(data))
         ones = np.ones(data.shape[1],dtype=float)
         for itr in range(niter):
             ### resample r
             msum = sample_crp_tablecounts(float(self.r),data,ones).sum()
             self.r = np.random.gamma(self.k_0 + msum, 1/(1/self.theta_0 - N*np.log(1-self.p)))
             ### resample p
             self.p = np.random.beta(self.alpha_0 + data.sum(), self.beta_0 + N*self.r)
     return self
Пример #9
0
 def resample_python(self,data=[],niter=20):
     if getdatasize(data) == 0:
         self.p = np.random.beta(self.alpha_0,self.beta_0)
         self.r = np.random.gamma(self.k_0,self.theta_0)
     else:
         data = flattendata(data)
         N = len(data)
         for itr in range(niter):
             ### resample r
             msum = 0.
             for n in data:
                 msum += (np.random.rand(n) < self.r/(np.arange(n)+self.r)).sum()
             self.r = np.random.gamma(self.k_0 + msum, 1/(1/self.theta_0 - N*np.log(1-self.p)))
             ### resample p
             self.p = np.random.beta(self.alpha_0 + data.sum(), self.beta_0 + N*self.r)
     return self
Пример #10
0
 def resample_python(self,data=[],niter=20):
     if getdatasize(data) == 0:
         self.p = np.random.beta(self.alpha_0,self.beta_0)
         self.r = np.random.gamma(self.k_0,self.theta_0)
     else:
         data = flattendata(data)
         N = len(data)
         for itr in range(niter):
             ### resample r
             msum = 0.
             for n in data:
                 msum += (np.random.rand(n) < self.r/(np.arange(n)+self.r)).sum()
             self.r = np.random.gamma(self.k_0 + msum, 1/(1/self.theta_0 - N*np.log(1-self.p)))
             ### resample p
             self.p = np.random.beta(self.alpha_0 + data.sum(), self.beta_0 + N*self.r)
     return self
Пример #11
0
 def resample_logseriesaug(self,data=[],niter=20):
     # an alternative algorithm, kind of opaque and no advantages...
     if getdatasize(data) == 0:
         self.p = np.random.beta(self.alpha_0,self.beta_0)
         self.r = np.random.gamma(self.k_0,self.theta_0)
     else:
         data = flattendata(data)
         N = data.shape[0]
         logF = self.logF
         L_i = np.zeros(N)
         data_nz = data[data > 0]
         for itr in range(niter):
             logR = np.arange(1,logF.shape[1]+1)*np.log(self.r) + logF
             L_i[data > 0] = sample_discrete_from_log(logR[data_nz-1,:data_nz.max()],axis=1)+1
             self.r = np.random.gamma(self.k_0 + L_i.sum(), 1/(1/self.theta_0 - np.log(1-self.p)*N))
             self.p = np.random.beta(self.alpha_0 + data.sum(), self.beta_0 + N*self.r)
     return self
Пример #12
0
 def resample_logseriesaug(self,data=[],niter=20):
     # an alternative algorithm, kind of opaque and no advantages...
     if getdatasize(data) == 0:
         self.p = np.random.beta(self.alpha_0,self.beta_0)
         self.r = np.random.gamma(self.k_0,self.theta_0)
     else:
         data = flattendata(data)
         N = data.shape[0]
         logF = self.logF
         L_i = np.zeros(N)
         data_nz = data[data > 0]
         for itr in range(niter):
             logR = np.arange(1,logF.shape[1]+1)*np.log(self.r) + logF
             L_i[data > 0] = sample_discrete_from_log(logR[data_nz-1,:data_nz.max()],axis=1)+1
             self.r = np.random.gamma(self.k_0 + L_i.sum(), 1/(1/self.theta_0 - np.log(1-self.p)*N))
             self.p = np.random.beta(self.alpha_0 + data.sum(), self.beta_0 + N*self.r)
     return self
Пример #13
0
    def _get_statistics(self,data):
        # NOTE: since this isn't really in exponential family, this method needs
        # to look at hyperparameters. form posterior hyperparameters for the p
        # parameters here so we can integrate them out and get the r statistics
        n, tot = super(NegativeBinomialIntegerR,self)._get_statistics(data)
        if n > 0:
            alpha_n, betas_n = self.alpha_0 + tot, self.beta_0 + self.r_support*n
            data = flattendata(data)
            log_marg_likelihoods = \
                    special.betaln(alpha_n, betas_n) \
                        - special.betaln(self.alpha_0, self.beta_0) \
                    + (special.gammaln(data[:,na]+self.r_support)
                        - special.gammaln(data[:,na]+1) \
                        - special.gammaln(self.r_support)).sum(0)
        else:
            log_marg_likelihoods = np.zeros_like(self.r_support)

        return n, tot, log_marg_likelihoods
Пример #14
0
    def _get_statistics(self,data):
        # NOTE: since this isn't really in exponential family, this method needs
        # to look at hyperparameters. form posterior hyperparameters for the p
        # parameters here so we can integrate them out and get the r statistics
        n, tot = super(NegativeBinomialIntegerR,self)._get_statistics(data)
        if n > 0:
            alpha_n, betas_n = self.alpha_0 + tot, self.beta_0 + self.r_support*n
            data = flattendata(data)
            log_marg_likelihoods = \
                    special.betaln(alpha_n, betas_n) \
                        - special.betaln(self.alpha_0, self.beta_0) \
                    + (special.gammaln(data[:,na]+self.r_support)
                        - special.gammaln(data[:,na]+1) \
                        - special.gammaln(self.r_support)).sum(0)
        else:
            log_marg_likelihoods = np.zeros_like(self.r_support)

        return n, tot, log_marg_likelihoods