def test_1d_integrate_withnoise():
    """ Test integration of synthetic 1D data with two peaks and noise"""

    # seed random number
    np.random.seed(0)

    # generate test scale
    ppm_scale = _build_1d_ppm_scale()
    uc = ng.fileio.fileiobase.uc_from_freqscale(ppm_scale, 100)

    # generate test data
    data = (multivariate_normal.pdf(ppm_scale, mean=5, cov=0.01) +
            multivariate_normal.pdf(ppm_scale, mean=8, cov=0.01) * 2)
    noise = np.random.randn(*data.shape) * 0.01
    data = data + noise

    # import matplotlib.pyplot as plt
    # plt.figure()
    # plt.plot(ppm_scale, data)
    # plt.show()

    # estimate of how much the noise can throw off the the integral
    limits = (4.0, 6.0)
    n = abs(uc(4.5, 'ppm')-uc(5.5, 'ppm'))
    max_error = np.std(noise) * np.sqrt(n)

    # Test with a single integral region sanity check.
    assert abs(integrate(data, uc, limits) - 1) <= max_error

    # Test renormalization of norms
    resutls = integrate(data, uc, ((4, 6), (7, 9)), noise_limits=(1, 2),
                        norm_to_range=1)

    # Test renormalization of values.
    assert abs(resutls[0, 0] - 0.5) <= max_error
Esempio n. 2
0
def get_prob_point(point,Sigma,mu_L,Sigma_L,rsTs,M):
    '''
    given a 6D point, covariance matrices, mean, input points
    and camera matrix, find out how probable the two 3D points of 
    6D point are given the input data
    '''
    # split up 6D point into two 3D points, initial and final
    p_i = point[:3]
    p_f = point[3:]
    # convert each point to 2D
    q_i = convert_3D_to_2D(p_i,M)
    q_f = convert_3D_to_2D(p_f,M)
    # get prior prob for each point, then multiply them for joint prob
    prior_i = (multivariate_normal.pdf(p_i,mean=mu_L,cov=Sigma_L))
    prior_f = (multivariate_normal.pdf(p_f,mean=mu_L,cov=Sigma_L))
    joint_prior = prior_i*prior_f
    # evaluate p_i and p_f on all given, 2D rendered points
    probs=[]
    for dataPoint in rsTs:
        r = dataPoint[:2]
        t = dataPoint[2]
        q_s = q_i + (q_f-q_i)*t
        q_s = q_s.flatten()
        prob_r = (multivariate_normal.pdf(r,mean=q_s,cov=Sigma))
        probs.append(prob_r*joint_prior)
    sumProbs = np.sum(probs)
    return sumProbs
def test_1d_integrate():
    """ Test integration of synthetic 1D data with two peaks."""
    # generate test scale
    ppm_scale = _build_1d_ppm_scale()
    uc = ng.fileio.fileiobase.uc_from_freqscale(ppm_scale, 100)

    # generate test data
    data = (multivariate_normal.pdf(ppm_scale, mean=5, cov=0.01) +
            multivariate_normal.pdf(ppm_scale, mean=8, cov=0.01) * 2.)

    # import matplotlib.pyplot as plt
    # plt.figure()
    # plt.plot(ppm_scale, data)
    # plt.show()

    # Test with a single integral region
    assert integrate(data, uc, (4, 6)) - 1.0 <= np.finfo(float).eps
    assert integrate(data, uc, (7, 9)) - 2.0 <= np.finfo(float).eps

    # Test with multiple integral regions:
    limits = [(4, 6), (7, 9)]
    values_1 = integrate(data, uc, limits)
    assert_array_almost_equal(values_1,  [1., 2.])

    # Test with multiple integral regions and re-normalization of the values
    # to the value of the range between 7 ppm and 9 ppm
    values_2 = integrate(data, uc, limits, norm_to_range=1)
    assert_array_almost_equal(values_2,  [0.5, 1.])

    # Test with multiple integral regions and re-normalization of the values
    # to the value of the range between 7 ppm and 9 ppm and re-calibration
    # of the the value to 3.
    values_2 = integrate(data, uc, limits, norm_to_range=1, calibrate=3)
    assert_array_almost_equal(values_2,  [0.5 * 3, 3.])
Esempio n. 4
0
def main():

  #loads data from .mat file
  data = scipy.io.loadmat('anomaly_data.mat')
  X = data['X']
  #validation data (ground truth) 
  #(yval=1 if anomalous and 0 otherwise) 
  Xval = data['Xval']
  yval = data['yval']
  yval = yval.ravel()
  
  #finds the mean and variance of each feature of data
  mu = np.mean(X, 0)
  sigma2 = np.var(X, 0)
  
  #fits a multivariate Gaussian distribution to the data and 
  #finds probablity distribution function 
  p = multivariate_normal.pdf(X, mu, sigma2)
  #finds predicted pdf for validation data
  pval = multivariate_normal.pdf(Xval, mu, sigma2)
  
  threshold, F1_score = find_threshold(yval, pval)
  
  #total number of outliers
  number_outlier = sum(p < threshold)
  #the indices of outliers
  outlier_index = np.where(p < threshold)[0].tolist()

  print '\nThere are %d outliers in total.\n' %number_outlier

  #writes the indices of the ouliers to a file    
  f = open('outlier_indices.txt', 'w')
  f.writelines('%d\n'%index for index in outlier_index)
  f.close()
Esempio n. 5
0
def compute_model1_loglikelihood(x, pi, mean0, mean1, cov0, cov1):
  n, m = len(x), len(x[0])
  loglikelihood = 0.0
  for i in xrange(n):
    for j in xrange(m):
      p_x_ij = pi * multivariate_normal.pdf(x[i][j], mean=mean1, cov=cov1) + \
             (1 - pi) * multivariate_normal.pdf(x[i][j], mean=mean0, cov=cov0)
      loglikelihood += math.log(p_x_ij)
  return loglikelihood
Esempio n. 6
0
def estimate_posterior_z_by_x(x_row, phi, lambd, mean0, mean1, cov0, cov1):
  m = len(x_row)
  posterior_y = estimate_posterior_y_by_x(x_row, phi, lambd, mean0, mean1, cov0, cov1)
  posterior_z = [None] * m
  for j in xrange(m):
    p_z_0 = (posterior_y * (1 - lambd) + (1 - posterior_y) * lambd) * multivariate_normal.pdf(x_row[j], mean=mean0, cov=cov0)
    p_z_1 = (posterior_y * lambd + (1 - posterior_y) * (1 - lambd)) * multivariate_normal.pdf(x_row[j], mean=mean1, cov=cov1)
    posterior_z[j] = p_z_1 / (p_z_0 + p_z_1)
  return posterior_z
Esempio n. 7
0
def estimate_posterior_y_by_x(x_row, phi, lambd, mean0, mean1, cov0, cov1):
  m = len(x_row)
  p_y_1_running = math.log(phi)
  p_y_0_running = math.log(1.0 - phi)
  for j in xrange(m):
    # sending messages from Z_j to Y and marginalizing all-in-one
    p_y_1_running += math.log(lambd * multivariate_normal.pdf(x_row[j], mean=mean1, cov=cov1) + (1 - lambd) * multivariate_normal.pdf(x_row[j], mean=mean0, cov=cov0))
    p_y_0_running += math.log((1 - lambd) * multivariate_normal.pdf(x_row[j], mean=mean1, cov=cov1) + lambd * multivariate_normal.pdf(x_row[j], mean=mean0, cov=cov0))
  return expit(p_y_1_running - p_y_0_running)
Esempio n. 8
0
 def density(self, x):
     for i in range(5):
         try:
             return multivariate_normal.pdf(x, self.mu, self.cov)
         except (ValueError, np.linalg.linalg.LinAlgError) as err:
             if 'singular matrix' or 'the input matrix must be positive semidefinite' in err:
                 self.cov = positive_def(self.cov, i)
                 i += 1
                 print("det", np.linalg.det(self.cov))
     return multivariate_normal.pdf(x, self.mu, self.cov)
 def __gaussianProb(self, x, means, covariances, nClasses):
     class_probs = np.zeros(nClasses)
     for c in range(nClasses):
         if self.isSharedCovariance:
             class_probs[c] = multivariate_normal.pdf(x, mean=means[c],
                     cov=covariances)
         else:
             class_probs[c] = multivariate_normal.pdf(x, mean=means[c],
                     cov=covariances[c])
     return np.log(class_probs)
 def posterior(self,x):
     posteriors = []
     for ks in set(self.Y):
         if self.isSharedCovariance == True:
             likelihood = multivariate_normal.pdf(x, self.mu[ks], self.shared)
         if self.isSharedCovariance == False:
             likelihood = multivariate_normal.pdf(x, self.mu[ks], self.s_k[ks])
         prob = likelihood * self.prior[ks]
         posteriors.append(prob)
     return posteriors
def do( img_path ):
    im_data = sio.imread( img_path )
    nr, nc, _ = im_data.shape
    print nr, nc 
    print im_data[:,:,0]
    new_data = im_data.reshape( nr*nc , 3)
    print new_data.shape
    print new_data

    #NOTE, there is no parameter at all.
    # first try the T.png

    # fit using one gaussian.
    print np.mean( new_data, axis = 0)
    print np.cov( new_data, rowvar = 0)

    # fit with two gaussian, EM algorithm. 
    # but with limitation that the two separate cluster 
    # TODO, should also be connected in the space 

    # for all the pixels, randomly assign the label as 0 or 1 
    max_iter = 100
    label = np.random.randint(2, size=nr*nc) 
    while True:
        c1_data = new_data[label==0, :]
        mean1 = np.mean( c1_data, axis=0)
        std1 = np.cov( c1_data, rowvar=0)
        #print 'new iteration'
        #print mean1
        #print std1
        std1 = std1 + 0.01*np.max(std1)*np.identity(3)
        
        c2_data = new_data[label==1, :]
        mean2 = np.mean( c2_data, axis=0)
        std2 = np.cov( c2_data, rowvar=0)
        #print mean2
        #print std2
        std2 = std2 + 0.01*np.max(std2)*np.identity(3)

        # NOTE
        p1 = multivariate_normal.pdf( new_data, mean=mean1, cov=std1 )
        p2 = multivariate_normal.pdf( new_data, mean=mean2, cov=std2 )
        new_label = np.zeros( nr*nc )
        new_label[p1<p2] = 1
        #print np.sum( new_label != label)
        if np.sum( new_label != label) < 2:
            label = new_label
            break
        label = new_label
        max_iter -= 1
        if max_iter < 0:
            break

    print max_iter
def optimal_bayes(X, y, means):
    '''
    First 10 means m_k are generated from bivariate Gaussian 
    N([0,1],I) and labeled as RED, another 10 means are generated
    from N([1,0],I) and labeled as BLUE. Then 100 RED observations 
    are generated by first pick an m_k at random with p=0.1, then
    generate observation by N(m_k,I/5). Another 100 BLUE 
    observations are generated by the same procedure.
    Optimal Bayes decision attribute G(x) = k-th class where
    P(Y in k-th class | X = x) is the maximum.
    Estimated runtime = 25s
    '''
    from scipy.stats import multivariate_normal
    delta = .1
    grid_x = np.arange(min(X[:,0])-.5, max(X[:,0])+.5, delta)
    grid_y = np.arange(min(X[:,1])-.5, max(X[:,1])+.5, delta)
    grid_X, grid_Y = np.meshgrid(grid_x, grid_x)
    combine_XY = np.dstack((grid_X,grid_Y)).reshape(grid_X.size,2)
    Z = []
    for p in combine_XY:
        dist_B = .0
        dist_R = .0
        covar  = [[0.2,0],[0,0.2]]
        for m in means[:10,:]:
            dist_B += multivariate_normal.pdf(p, mean=m, cov=covar)
        for m in means[10:,:]:
            dist_R += multivariate_normal.pdf(p, mean=m, cov=covar)
        Z.append(np.exp(np.log(dist_B) - np.log(dist_R)) - 1.)
    Z = np.array(Z)
    grid_Z = Z.reshape(grid_X.shape)
    plt.scatter(X[:,0], X[:,1], c=y, alpha=.6)
    plt.scatter(means[:10,0], means[:10,1], s=80, color='blue')
    plt.scatter(means[10:,0], means[10:,1], s=80, color='red')
    plt.contour(grid_X, grid_Y, grid_Z, 1, alpha=.8,
                colors='b', linewidths=3)
    plt.show()
    n = len(y)
    predict = []
    for i in range(n):
        dist_B = .0
        dist_R = .0
        covar  = [[0.2,0],[0,0.2]]
        for m in means[:10,:]:
            dist_B += multivariate_normal.pdf(X[i,:], mean=m, 
                                              cov=covar)
        for m in means[10:,:]:
            dist_R += multivariate_normal.pdf(X[i,:], mean=m, 
                                              cov=covar)
        if (dist_B > dist_R):
            predict.append(0)
        else:
            predict.append(1)
    print 'Precision:', 
    print 100. * sum(predict == y) / len(y)
Esempio n. 13
0
    def compute_likelihood(t0, t1):
        """
            Compute the likelihood associated with parameters t0 and t1.
                > Store in max_likelihood and max_indices if relevant.

        :param t0: End of first rest period
        :param t1: End of movement and/or beginning of second rest period.
        """

        nonlocal max_likelikehood, max_indices, min_interval, emg_data, T
        if t1 - t0 < min_interval:
            return

        #
        # Compute MLE estimate of rest distribution parameters
        #
        rest_one        = emg_data[0:t0]
        rest_two        = emg_data[t1:T]
        rest_samples    = np.concatenate((rest_one, rest_two), axis=0)
        rest_mean       = np.mean(rest_samples, axis=0)
        rest_cov        = np.cov(rest_samples, rowvar=False)
        rest_var        = np.diag(rest_cov)

        #
        # Compute MLE estimate of movement signal distribution parameters
        #
        sig_samples = emg_data[t0:t1]
        sig_mean    = np.mean(sig_samples, axis=0)
        sig_cov     = np.cov(sig_samples, rowvar=False)
        sig_var     = np.diag(sig_cov)

        # Can't have rest have more power than signal itself
        found_error = False
        for i in range(num_emg_ch):
            if rest_var[i] > sig_var[i]:
                found_error = True
                break
        if found_error:
            return

        try:
            rest_sum        = np.sum(np.log(multivariate_normal.pdf(x=rest_samples, mean=rest_mean, cov=rest_cov)))
            sig_sum         = np.sum(np.log(multivariate_normal.pdf(x=sig_samples, mean=sig_mean, cov=sig_cov)))
            log_likelihood  = rest_sum + sig_sum

            if log_likelihood > max_likelikehood:
                max_likelikehood    = log_likelihood
                max_indices         = (t0, t1)

        except np.linalg.LinAlgError as e:
            if 'singular matrix' in str(e):
                return
            else:
                raise (e)
Esempio n. 14
0
 def test_logpdf_default_values(self):
     # Check that the log of the pdf is in fact the logpdf
     # with default parameters Mean=None and cov = 1
     np.random.seed(1234)
     x = np.random.randn(5)
     d1 = multivariate_normal.logpdf(x)
     d2 = multivariate_normal.pdf(x)
     # check whether default values are being used
     d3 = multivariate_normal.logpdf(x, None, 1)
     d4 = multivariate_normal.pdf(x, None, 1)
     assert_allclose(d1, np.log(d2))
     assert_allclose(d3, np.log(d4))
def Post_predictive(x, nu_0,mu_0,X_cl, N_cl, dim):
    m0 = np.zeros(dim)
    x_bar = mu_0

    if len(X_cl) != N_cl:
        print "something wrong"
        return None

    if N_cl < 1:
        if np.random.rand() < 0.1:
            return 0.
        else:
            return np.random.rand()

    if N_cl == 1:
        k0 = 0.01
        mN = (k0 / (k0 + N_cl))*m0 + (N_cl/(k0+N_cl)) * x_bar
        SN = np.dot(np.reshape(x,(dim,1)),np.reshape(x,(dim,1)).T)
        # _,SN,_ = np.linalg.svd(SN)
        # SN = np.diag(SN)
        try:
            return multivariate_normal.pdf(x,mean=mN, cov=SN)
        except:
            # print("regularized...")
            return multivariate_normal.pdf(x,mean=mN, cov=SN+1e-6*np.eye(dim))

    else:
        Cov_est = np.zeros((dim,dim))
        for idx in range(len(X_cl)):
            diff = np.reshape(X_cl[idx]-mu_0,(dim,1))
            Cov_est += np.dot(diff,diff.T)

        S0  = Cov_est / float(len(X_cl))
        k0 = 0.01

        ''' NIW posterior param '''
        mN = (k0 / (k0 + N_cl))*m0 + (N_cl/(k0+N_cl)) * x_bar
        kN = k0 + N_cl
        nu_N = nu_0 + N_cl


        SN = S0 + Cov_est
        x_bar_ = np.reshape(x_bar,(dim,1))
        SN += (k0*N)/(k0+N)*np.dot(x_bar_,x_bar_.T)
        SN *= (kN+1)/(kN*(nu_N - dim + 1))
        _,SN,_ = np.linalg.svd(SN)
        SN = np.diag(SN)
        try:
            return multivariate_normal.pdf(x, mean=mN,cov=SN)
        except:
            # print("regularized...")
            return multivariate_normal.pdf(x,mean=mN, cov=SN+1e-6*np.eye(dim))
Esempio n. 16
0
def compute_model2_loglikelihood(x, phi, lambd, mean0, mean1, cov0, cov1):
  n, m = len(x), len(x[0])
  loglikelihood = 0.0
  for i in xrange(n):
    for j in xrange(m):
      p_x_ij = (phi * lambd + (1 - phi) * (1 - lambd)) * multivariate_normal.pdf(x[i][j], mean=mean1, cov=cov1) + \
               ((1 - phi) * lambd + phi * (1 - lambd)) * multivariate_normal.pdf(x[i][j], mean=mean0, cov=cov0)
      try:
        loglikelihood += math.log(p_x_ij)
      except Exception:
        print 'ERROR!'
        print p_x_ij
  return loglikelihood
def test_1d_ndintegrate():
    """ Test integration of synthetic with ndintegrate.
    """
    # generate test scale
    ppm_scale = _build_1d_ppm_scale()
    uc = ng.fileio.fileiobase.uc_from_freqscale(ppm_scale, 100)

    # generate test data
    data = (multivariate_normal.pdf(ppm_scale, mean=5, cov=0.01) +
            multivariate_normal.pdf(ppm_scale, mean=8, cov=0.01) * 2.)

    # Test with a single  integral region
    assert abs(ndintegrate(data, uc, (4, 6)) - 1.0) <= 1e-10
    assert abs(ndintegrate(data, uc, [(7, 9), ]) - 2.0) <= 1e-10
Esempio n. 18
0
def test_scalar_values():
    np.random.seed(1234)

    # When evaluated on scalar data, the pdf should return a scalar
    x, mean, cov = 1.5, 1.7, 2.5
    pdf = multivariate_normal.pdf(x, mean, cov)
    assert_equal(pdf.ndim, 0)

    # When evaluated on a single vector, the pdf should return a scalar
    x = np.random.randn(5)
    mean = np.random.randn(5)
    cov = np.abs(np.random.randn(5))  # Diagonal values for cov. matrix
    pdf = multivariate_normal.pdf(x, mean, cov)
    assert_equal(pdf.ndim, 0)
Esempio n. 19
0
		def alpha(state, time, observations, A):
			if time == 0:
				# print self.pi[state] * multivariate_normal.pdf(observations[0], mean=self.obsProbs[state][0], cov=self.obsProbs[state][1])
				return self.pi[state] * multivariate_normal.pdf(observations[0], mean=self.obsProbs[state][0], cov=self.obsProbs[state][1])
			if A[state].get(time) != None:
				return A[state][time]
			else:
				Sum = 0
				prevStates = []
				nPrevs = min(3,state+1)
				for i in range(nPrevs):
					s = state - i
					Sum += alpha(s,time - 1,observations,A) * self.transProbs[s][state] * multivariate_normal.pdf(observations[time], mean=self.obsProbs[state][0], cov=self.obsProbs[state][1])
				A[state][time] = Sum
				return Sum
Esempio n. 20
0
def proposal(sample):
    '''
    Define multivariate normal distributions, and return a weighted
    sum of samples from them.
    Args:
    sample: array of samples to feed into weighted MVN distributions
    Returns:
    A weighted sum of MVN samples.'''
    mean_1 = [-1.0, -1.0]
    cov_1 = [[1.0, -0.8], [-0.8, 1.0]]
    mean_2 = [1, 2]
    cov_2 = [[1.5, 0.6], [0.6, 0.8]]
    g1 = multivariate_normal.pdf(sample, mean_1, cov_1)
    g2 = multivariate_normal.pdf(sample, mean_2, cov_2)
    return 0.6*g1+28.4*g2/(0.6+28.4)
Esempio n. 21
0
    def pdf(self, x=None):
        """Probability density function at state x.

        Will return a probability distribution relative to the shape of the
        input and the dimensionality of the normal. For example, if x is 5x2 
        with a 2-dimensional normal, pdf is 5x1; if x is 5x5x2 
        with a 2-dimensional normal, pdf is 5x5.
        """

        # Look over the whole state space
        if x is None:
            if not hasattr(self, 'pos'):
                self._discretize()
            x = self.pos

        # Ensure proper output shape
        x = np.atleast_1d(x)
        if self.ndims == x.shape[-1]:
            shape = x.shape[:-1]
        else:
            shape = x.shape

        pdf = np.zeros(shape)
        for i, weight in enumerate(self.weights):
            mean = self.means[i]
            covariance = self.covariances[i]
            gaussian_pdf = multivariate_normal.pdf(x, mean, covariance,
                                                   allow_singular=True)
            pdf += weight * gaussian_pdf

        return pdf
Esempio n. 22
0
def get_gaussian2D_pdf(data = None, xbins=10j, ybins=10j, mu = None, cov = None, 
                      std_K = 2, x_grid = None):
    ## Fit a gaussian to the data or use the parameters given.
    # Gives the gaussian set of points for the std_K
    mu = np.array(mu).flatten()
    std_1 = np.sqrt(cov[0,0])
    std_2 = np.sqrt(cov[1,1])
    if (type(data) != type(None)):
        mu = np.mean(data)
        cov = np.cpv(data)
    
    if (type(x_grid) == type(None)):
        xx, yy = np.mgrid[mu[0] - std_K*std_1:mu[0] + std_K*std_1:xbins, 
                          mu[1] - std_K*std_2:mu[1] + std_K*std_2:ybins]

    # Function to obtain the 3D plot of the pdf of a 2D gaussian
    # create grid of sample locations (default: 100x100)

    xy_sample = np.vstack([xx.ravel(), yy.ravel()]).T
#    xy_train  = np.vstack([data[:,[1]].T, data[:,[0]].T]).T
    

    # score_samples() returns the log-likelihood of the samples
    z = multivariate_normal.pdf(xy_sample,mu,cov)
    return xx, yy, np.reshape(z, xx.shape)
Esempio n. 23
0
def est_gmm(data, K):
    N = len(data)   # data is a list of arrays
    K = data[0].shape[0]
    gamma = [[0]*K for n in range(N)]
    pi = [1./N for k in range(K)]
    mu = [rand(M) for k in range(K)]    # TO BE IMPROVED
    sigma = [np.eye(M) for k in range(K)]
    for time in range(1000):
        print "Iteration\t#",time
        for n in range(N):  # E Step
            div = 0
            for k in range(K):
                gamma[n][k] = pi[k] * multivariate_normal.pdf(data[n].tolist(), 
                        mean=mu[k].tolist(), cov=sigma[k].tolist())
                div += gamma[n][k]
            gamma[n][k] /= div
        nums = reduce(lambda x,y: [x[i]+y[i] for i in range(M)], gamma)
        for k in range(K):  # M Step
            mu[k] = np.zeros(M)
            for n in range(N):
                mu[k] += gamma[n][k] * np.array(data[n])
            mu[k] /= nums[k]
            sigma[k] = np.zeros((M, M))
            for n in range(N):
                delt = np.mat(np.array(data[n])-mu[k])
                sigma[k] += gamma[n][k] * delt.transpose()*delt
            sigma[k] /= nums[k]
            pi[k] = nums[k] / N
    return pi, mu, sigma
Esempio n. 24
0
 def update_xgivenz(self):
     for k in range(self.num_clusters):
         for i, row in self.data.iterrows():
             # (x - \mu)^T \hat \Sigma^{-1} (x - \mu)
             self.xgivenz[i, k] = mvn.pdf(row, mean=self.cluster_centers[k],
                                               cov= self.cluster_covs[k])
     return
def likelihood(mean_location, y, **dist_params):
	#default distribution is Gaussian with mean= mean_location, variance = sigma_square
	from scipy.stats import multivariate_normal

	llh = multivariate_normal.pdf(y, mean=mean_location, cov=dist_params["sigma_square"])

	return llh
	def proba(self, x, y): # x is a data point, y is a label
		cov_mtx, avg, tlen = self.fix( self.dict[y] )
		mean = avg
		cov = cov_mtx
		
		p = multivariate_normal.pdf(x, mean=mean, cov=cov)
		return p
def multivariateGaussian(X, mu, sigma2):
	""" Computes the probability density function of the
	    multivariate gaussian distribution.

	    Computes the probability density function of the examples X 
	    under the multivariate gaussian distribution with parameters
	    mu and Sigma2. If Sigma2 is a matrix, it is treated as the 
	    covariance matrix. If Sigma2 is a vector, it is treated
	    as the \sigma^2 values of the variances in each dimension
	    (a diagonal covariance matrix)
	"""

	# Sigma2 = np.array(sigma2)
	# k = len(mu)
	# if ( np.ndim(Sigma2) == 1 or Sigma2.shape[0] == 1 
	#  	or Sigma2.shape[1] == 1 ):
	#  	Sigma2 = np.diag(Sigma2)


	# Xn = X - mu
	# p = ( (2 * np.pi) ** (- k / 2) * linalg.det(Sigma2) ** (-0.5) 
	#   	 * np.exp(-0.5 * np.sum(np.dot(Xn, linalg.pinv(Sigma2) ) 
	#                               * Xn, axis=1)) )

	# p can be calculated very quickly using the following function
	# http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.multivariate_normal.html
	p = multivariate_normal.pdf(X, mean=mu, cov=sigma2)
	return p
Esempio n. 28
0
def test_R_values():
    # Compare the multivariate pdf with some values precomputed
    # in R version 3.0.1 (2013-05-16) on Mac OS X 10.6.

    # The values below were generated by the following R-script:
    # > library(mnormt)
    # > x <- seq(0, 2, length=5)
    # > y <- 3*x - 2
    # > z <- x + cos(y)
    # > mu <- c(1, 3, 2)
    # > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
    # > r_pdf <- dmnorm(cbind(x,y,z), mu, Sigma)
    r_pdf = np.array([0.0002214706, 0.0013819953, 0.0049138692,
                      0.0103803050, 0.0140250800])

    x = np.linspace(0, 2, 5)
    y = 3 * x - 2
    z = x + np.cos(y)
    r = np.array([x, y, z]).T

    mean = np.array([1, 3, 2], 'd')
    cov = np.array([[1, 2, 0], [2, 5, .5], [0, .5, 3]], 'd')

    pdf = multivariate_normal.pdf(r, mean, cov)
    assert_allclose(pdf, r_pdf, atol=1e-10)
Esempio n. 29
0
def fit_rvc_cost(psi,w,Hd,K):
    import numpy as np
    from scipy.stats import multivariate_normal
    from sigmoid_function import sigmoid_function
    I=K[:,0].size
    # ensure that Hd is 1-D array
    Hd=np.reshape(Hd,[Hd.size,])
    Hd_diag=np.diag(Hd)
    # error in mvnpdf
    psi=psi.flatten()
    mvnpdf=multivariate_normal.pdf(psi,np.zeros([40,]),np.diag(1/Hd))
    L=I*(-1)*np.log(mvnpdf)
    psi=np.reshape(psi,[psi.size,1])
    g=I*np.dot(Hd_diag,psi)
    H=I*Hd_diag
    predictions=sigmoid_function(np.dot(psi.T,K))
    predictions=np.reshape(predictions,[predictions.size,1])
    for i in range(I):
        # update L
        y=predictions[i,0]
        if w[i]==1:
            L=L-np.log(y)
        else:
            L=L-np.log(1-y)

        # update g and H, debug here 2014/05/26
        K_temp=np.reshape(K[:,i],[K[:,i].size,1])
        g=g+(y-w[i])*K_temp
        H=H+y*(1-y)*np.dot(K_temp,K_temp.T)

    return L,g,H
Esempio n. 30
0
def fit_gmm(data, K=5, verbose=False):
    def likelihood(means, covariances, mixing_proportions):
        # Compute data log-likelihood for the given GMM parametrization
        densities = np.array([mvn.pdf(data, means[k], covariances[k]) for k in range(K)])
        unnormalized_responsibilities = densities * mixing_proportions
        return np.log(unnormalized_responsibilities.sum(axis=0)).sum()

    data = data.reshape(-1, data.shape[-1])

    N = data.shape[0]
    D = data.shape[1] # Dimension of the data points
    
    # Initialize the variables that are to be learned
    covariances = np.array([100 * np.eye(D) for k in range(K)]) # Covariance matrices
    mixing_proportions = np.ones([K, 1]) / K # Mixing propotions
    responsibilities = np.zeros([N, K])
    
    # Choose the initial centroids using k-means clustering
    kmeans = KMeans(n_clusters=K)
    kmeans = kmeans.fit(data)
    means = kmeans.cluster_centers_
    
    old_likelihood = likelihood(means, covariances, mixing_proportions)
    
    if verbose:
        print("Likelihood after intialization: {0:.2f}".format(old_likelihood))
        
    # Iterate until convergence
    it = 0
    converged = False
    while not converged:
        it += 1
        old_likelihood = likelihood(means, covariances, mixing_proportions)

        # Compute the responsibilities
        densities = np.array([mvn.pdf(data, means[k], covariances[k]) for k in range(K)])
        responsibilities = densities * mixing_proportions
        responsibilities = (responsibilities / responsibilities.sum(axis=0)).T

        # Update the distribution parameters
        resp_sums = responsibilities.sum(axis=0)
        means = responsibilities.T.dot(data)
        for k in range(K):
            means[k] /= resp_sums[k]
            covariances[k] = np.zeros(D)
            for n in range(N):
                centered = data[n, :] - means[k]
                covariances[k] += responsibilities[n, k] * np.outer(centered, centered)
            covariances[k] /= resp_sums[k]
            covariances[k] += 0.1 * np.eye(D) # To prevent singular matrices
        mixing_proportions = np.reshape(resp_sums / N, [K, 1])

        # Check for convergence
        new_likelihood = likelihood(means, covariances, mixing_proportions)
        delta = new_likelihood - old_likelihood
        converged = delta < np.abs(new_likelihood) * 1e-4
        if verbose:
            print("Iteration {0}, likelihood = {1:.2f}, delta = {2:.2f}".format(it, new_likelihood, delta))
            
    return (means, covariances, mixing_proportions)
Esempio n. 31
0
# Dimension of the target
d_theta = 2

# Set parameters of the target distribution
Z_pi = 100  # normalizing constant
rho_pi = np.ones(5) / 5  # mixture weights
# Mean of each mixand
mu_pi = np.array([[-10, -10], [0, 16], [13, 8], [-9, 7], [14, -14]])
# Covariance matrix of each mixand
sig_pi = np.array([[[2, 0.6], [0.6, 2]], [[2, -0.4], [-0.4, 2]],
                   [[2, 0.8], [0.8, 2]], [[3, 0.], [0., 0.5]],
                   [[2, -0.1], [-0.1, 2]]])

# Create a lambda which is the target probability distributionv (2D, 5 mode target)
log_target = lambda theta: np.log(Z_pi) + np.log(rho_pi[0] * mvn.pdf(
    theta, mu_pi[0], sig_pi[0]) + rho_pi[1] * mvn.pdf(theta, mu_pi[1], sig_pi[
        1]) + rho_pi[2] * mvn.pdf(theta, mu_pi[2], sig_pi[2]) + rho_pi[
            3] * mvn.pdf(theta, mu_pi[3], sig_pi[3]) + rho_pi[4] * mvn.pdf(
                theta, mu_pi[4], sig_pi[4]))

# Compute true target mean
target_mean_true = np.average(mu_pi, axis=0, weights=rho_pi)

# Sampler parameters
samp_num = 200
iter_num = 500
mix_num = 25
samp_per_mix = int(np.floor(samp_num / mix_num))
var0 = 1

def globCoupChangepointsSetMove(data,
                                X,
                                y,
                                mu,
                                alpha_gamma_sigma_sqr,
                                beta_gamma_sigma_sqr,
                                lambda_sqr,
                                sigma_sqr,
                                pi,
                                numSamples,
                                it,
                                change_points,
                                method='',
                                delta_sqr=[]):
    '''
    Documentation TODO
  '''
    try:  # get the value of delta sqr
        curr_delta_sqr = delta_sqr[it + 1]
    except IndexError:  # we are in a method that does not require delta^2
        curr_delta_sqr = []

    # Select a random birth, death or rellocate move
    randomInteger = randint(0, 2)

    # Changepoint moves selection
    validMove = True
    if randomInteger == 0:  # If the random integer is 0 then do a birth move
        newChangePoints = cpBirthMove(change_points, numSamples)
        if len(newChangePoints) > 9:
            validMove = False
        else:
            # Hashting ratio calculation
            hr = (numSamples - 1 - len(change_points)) / (len(newChangePoints))

    elif randomInteger == 1:  # do the death move
        try:
            newChangePoints = cpDeathMove(change_points)
        except ValueError:  # If the func fail then we stay the same
            validMove = False
            newChangePoints = change_points
        # Hashtings ratio calculation
        hr = (len(change_points)) / (numSamples - 1 - len(newChangePoints))

    else:  # do the rellocation move
        try:
            newChangePoints = cpRellocationMove(change_points)
        except ValueError:  # If the func fail then we stay the same
            validMove = False
            #newChangePoints = change_points
        # Hashtings ratio calculation
        hr = 1
    if validMove:
        # Calculate the marginal likelihood of the current cps set
        logmarginalTau = calculateMarginalLikelihoodWithChangepoints(
            X, y, mu, alpha_gamma_sigma_sqr, beta_gamma_sigma_sqr,
            lambda_sqr[it + 1], numSamples, change_points, method,
            curr_delta_sqr)

        # Get the density of mu
        _, _, muDensity = muSampler(mu, change_points, X, y, sigma_sqr[it + 1],
                                    lambda_sqr[it + 1])

        # ---> Reconstruct the design ndArray, mu vector and parameters for the marg likelihook calc
        # Select the data according to the set Pi
        partialData = selectData(data, pi)
        # Design ndArray
        XStar = constructNdArray(partialData, numSamples, newChangePoints)
        respVector = data['response'][
            'y']  # We have to partition y for each changepoint as well
        yStar = constructResponseNdArray(respVector, newChangePoints)
        # Mu matrix
        muDagger = constructMuMatrix(pi)

        # Mu matrix star matrix (new)
        muStar, muStarDensity, _ = muSampler(muDagger, newChangePoints, XStar,
                                             yStar, sigma_sqr[it + 1],
                                             lambda_sqr[it + 1])

        # After changes on the design matrix now we can calculate the modified marg likelihood
        # Calculate the marginal likelihood of the new cps set and new mu star
        logmarginalTauStar = calculateMarginalLikelihoodWithChangepoints(
            XStar, yStar, muStar, alpha_gamma_sigma_sqr, beta_gamma_sigma_sqr,
            lambda_sqr[it + 1], numSamples, newChangePoints, method,
            curr_delta_sqr)

        # Prior calculations for tau, tau*, mu, mu*
        # >>>>>>>>>>>>>>>>>>
        tauPrior = calculateChangePointsSetPrior(change_points)
        tauStarPrior = calculateChangePointsSetPrior(newChangePoints)

        # Calculate the prior probabilities for mu and mu*
        # TODO functionalize this calculations
        muDagger = np.zeros(mu.shape[0])
        muDaggerPlus = np.zeros(
            muStar.shape[0])  # we need this in order to calc the density
        sigmaDagger = np.eye(muDagger.shape[0])
        sigmaDaggerPlus = np.eye(muDaggerPlus.shape[0])
        muStarPrior = multivariate_normal.pdf(muStar.flatten(),
                                              mean=muDaggerPlus.flatten(),
                                              cov=sigmaDaggerPlus)
        muPrior = multivariate_normal.pdf(mu.flatten(),
                                          mean=muDagger.flatten(),
                                          cov=sigmaDagger)

        # Get the threshhold of the probability of acceptance of the move
        acceptanceRatio = min(
            1, logmarginalTauStar - logmarginalTau + math.log(tauStarPrior) -
            math.log(tauPrior) + math.log(muStarPrior) - math.log(muPrior) +
            math.log(muDensity) - math.log(muStarDensity) + math.log(hr))
        # Get a sample from the U(0,1) to compare the acceptance ratio
        u = np.random.uniform(0, 1)
        if u < math.exp(acceptanceRatio):
            # if the sample is less than the acceptance ratio we accept the move to Tau* (the new cps)
            change_points = newChangePoints
            # also move to mu*
            mu = muStar

    return change_points, mu
Esempio n. 33
0
 def logLH(self):
     pdfs = np.zeros(((self.n_points, self.n_clusters)))
     for i in range(self.n_clusters):
         pdfs[:, i] = self.Pi[i] * multivariate_normal.pdf(
             self.data, self.Mu[i], np.diag(self.Var[i]))
     return np.mean(np.log(pdfs.sum(axis=1)))
Esempio n. 34
0
def pimais(log_target, d, D=10, N=5, I=200, var_prop=1, bounds=(-10, 10), K=1):
    """
    Runs the parallel interacting Markov adaptive importance sampling algorithm
    :param log_target: Logarithm of the target distribution
    :param d: Dimension of the sampling space
    :param D: Number of proposals
    :param N: Number of samples to draw per proposal
    :param I: Number of iterations
    :param K: Number of MCMC steps
    :param var_prop: Variance of each proposal distribution
    :param bounds: Prior to generate location parameters over [bounds]**d hypercube
    :return APISSampler object
    """
    # Determine the total number of particles
    M = D * N

    # Initialize the means of the mixture proposal
    mu = np.random.uniform(bounds[0], bounds[1], (D, d))

    # Initialize storage of particles and log weights
    particles = np.zeros((M * I, d))
    log_weights = np.ones(M * I) * (-np.inf)
    means = np.zeros((D * (I + 1), d))

    # Set initial locations to be the parents
    means[0:D] = mu

    # Initialize storage of evidence and target mean estimates
    evidence = np.zeros(I)
    target_mean = np.zeros((I, d))

    # Initialize the states of the D Markov chains
    chain = mu

    # Initialize start counter
    start = 0
    startd = 0

    # Loop for the algorithm
    for i in tqdm(range(I)):
        # Update start counter
        stop = start + M
        stopd = startd + D

        # Generate particles
        children = np.repeat(
            chain, N,
            axis=0) + np.sqrt(var_prop) * np.random.multivariate_normal(
                np.zeros(d), np.eye(d), M)
        particles[start:stop] = children

        # Compute log proposal
        log_prop_j = np.zeros((M, D))
        prop = np.zeros(M)
        for j in range(D):
            log_prop_j[:, j] = mvn.pdf(children,
                                       mean=mu[j],
                                       cov=var_prop * np.eye(d),
                                       allow_singular=True)
            prop += log_prop_j[:, j] / D
        log_prop = np.log(prop)

        # Compute log weights and store
        log_target_eval = log_target(children)
        log_w = log_target_eval - log_prop
        log_weights[start:stop] = log_w

        # Compute estimate of evidence
        max_log_weight = np.max(log_weights[0:stop])
        weights = np.exp(log_weights[0:stop] - max_log_weight)
        log_z = np.log(1 /
                       (M *
                        (i + 1))) + max_log_weight + np.log(np.sum(weights))
        evidence[i] = np.exp(log_z)

        # Compute estimate of the target mean
        target_mean[i, :] = np.average(particles[0:stop, :],
                                       axis=0,
                                       weights=weights)

        # Adapt the parameters of the proposal distribution
        for j in range(D):
            # PART 1: Obtain target samples by sampling from a Markov chain
            z_old = chain[j]
            for k in range(K):
                # Propagation using Markov transition kernel
                z_star = np.random.multivariate_normal(z_old, np.eye(d))
                # Compute the acceptance probability (symmetric transition kernel)
                ap = np.exp(log_target(z_star) - log_target(z_old))
                # Check to see if the sample should be accepted
                if np.random.rand() < ap:
                    chain[j] = z_star
                    z_old = z_star

        # Store parameters
        means[startd + D:stopd + D] = chain

        # Update start counters
        start = stop
        startd = stopd

    # Generate output
    output = APISSampler(particles, log_weights, means, evidence, target_mean)

    return output
Esempio n. 35
0
def make_multimodal_samples(n_samples: int = 15,
                            *,
                            n_modes: int = 1,
                            points_per_dim: int = 100,
                            dim_domain: int = 1,
                            dim_codomain: int = 1,
                            start: float = -1,
                            stop: float = 1.,
                            std: float = .05,
                            mode_std: float = .02,
                            noise: float = .0,
                            modes_location=None,
                            random_state=None):
    r"""Generate multimodal samples.

    Each sample :math:`x_i(t)` is proportional to a gaussian mixture, generated
    as the sum of multiple pdf of multivariate normal distributions with
    different means.

    .. math::

        x_i(t) \propto \sum_{n=1}^{\text{n\_modes}} \exp \left (
        {-\frac{1}{2\sigma} (t-\mu_n)^T \mathbb{1} (t-\mu_n)} \right )

    Where :math:`\mu_n=\text{mode\_location}_n+\epsilon` and :math:`\epsilon`
    is normally distributed, with mean :math:`\mathbb{0}` and standard
    deviation given by the parameter `std`.

    Args:
        n_samples: Total number of samples.
        n_modes: Number of modes of each sample.
        points_per_dim: Points per sample. If the object is multidimensional
            indicates the number of points for each dimension in the domain.
            The sample will have :math:
            `\text{points_per_dim}^\text{dim_domain}` points of
            discretization.
        dim_domain: Number of dimensions of the domain.
        dim_codomain: Number of dimensions of the image
        start: Starting point of the samples. In multidimensional objects the
            starting point of each axis.
        stop: Ending point of the samples. In multidimensional objects the
            ending point of each axis.
        std: Standard deviation of the variation of the modes location.
        mode_std: Standard deviation :math:`\sigma` of each mode.
        noise: Standard deviation of Gaussian noise added to the data.
        modes_location:  List of coordinates of each mode.
        random_state: Random state.

    Returns:
        :class:`FDataGrid` object comprising all the samples.
    """

    random_state = sklearn.utils.check_random_state(random_state)

    if modes_location is None:

        location = make_multimodal_landmarks(n_samples=n_samples,
                                             n_modes=n_modes,
                                             dim_domain=dim_domain,
                                             dim_codomain=dim_codomain,
                                             start=start,
                                             stop=stop,
                                             std=std,
                                             random_state=random_state)

    else:
        location = np.asarray(modes_location)

        shape = (n_samples, dim_codomain, n_modes, dim_domain)
        location = location.reshape(shape)

    axis = np.linspace(start, stop, points_per_dim)

    if dim_domain == 1:
        sample_points = axis
        evaluation_grid = axis
    else:
        sample_points = np.repeat(axis[:, np.newaxis], dim_domain, axis=1).T

        meshgrid = np.meshgrid(*sample_points)

        evaluation_grid = np.empty(meshgrid[0].shape + (dim_domain, ))

        for i in range(dim_domain):
            evaluation_grid[..., i] = meshgrid[i]

    # Data matrix of the grid
    shape = (n_samples, ) + dim_domain * (points_per_dim, ) + (dim_codomain, )
    data_matrix = np.zeros(shape)

    # Covariance matrix of the samples
    cov = mode_std * np.eye(dim_domain)

    import itertools
    for i, j, k in itertools.product(range(n_samples), range(dim_codomain),
                                     range(n_modes)):
        data_matrix[i, ...,
                    j] += multivariate_normal.pdf(evaluation_grid,
                                                  location[i, j, k], cov)

    # Constant to make modes value aprox. 1
    data_matrix *= (2 * np.pi * mode_std)**(dim_domain / 2)

    data_matrix += random_state.normal(0, noise, size=data_matrix.shape)

    return FDataGrid(sample_points=sample_points, data_matrix=data_matrix)
Esempio n. 36
0
 def __call__(self, theta):
     return multivariate_normal.pdf(theta, self._mu, self._sigma)
Esempio n. 37
0
def gaussian(x_n, mu, sigma, pi_c, conv):
    #extract number of classes and dimensions
    C_dim = np.array(pi_c.shape)
    C = C_dim[0]
    mu_dim = np.array(mu.shape)
    N_dim = np.array(x_n.shape)
    D = mu_dim[1]
    N = N_dim[0]
    llh1 = log_lh(x_n, mu, sigma, pi_c)
    log_array = np.array([0, llh1])
    diff_log = 1.0
    pic_new = pi_c
    mu_new = mu
    sig_new = sigma
    i = 0
    while diff_log > conv:  # Convergence condition
        #E-step
        y = np.zeros((N, C, D))
        y2 = np.zeros((N, C, D))
        rho = np.zeros((N, C))

        #Calculation of r[n][k] for each data point
        for n in range(N):
            x1 = x_n[n, :]
            den = 0
            den1 = 0
            for c in range(C):
                mu_n = mu[c, :]
                sigma_n = sigma[c, :]
                den1 = pi_c[c] * mn.pdf(x1, mu_n, sigma_n)
                den += den1
            for c in range(C):
                mu_n = mu[c, :]
                sigma_n = sigma[c, :]
                rho[n][c] = (pi_c[c] * mn.pdf(x1, mu_n, sigma_n)) / den
                y[n][c][:] = rho[n][c] * x1

        #M-step

        #print("rho = " ,rho)

        #sum over individual values of rho^n for each class
        k1 = (np.sum(rho, axis=0))
        k2 = (np.sum(y, axis=0))

        #print('k1 = ' ,k1)
        #print('k2 = ' ,k2)

        #Calculation of new Prior Probabilities and new Means
        for c in range(C):
            pic_new[c] = k1[c] / N
            mu_new[c, :] = k2[c, :] / k1[c]
        #print ("pic_new = " ,pic_new)
        #print ("mu_new = " ,mu_new)

        # Calculation of new Variances
        k3 = np.zeros((C, D, D), dtype=float)
        #print (k3.shape)
        for c in range(C):
            mu_n = mu_new[c, :]
            for n in range(N):
                x1 = x_n[n, :]
                k3[c, :, :] += rho[n][c] * np.outer((x1 - mu_n), (x1 - mu_n).T)
                #print('k3[c]', k3[c,:,:], rho[n][c], x1, mu_n)
            sig_new[c, :, :] = k3[c] / k1[c]
            #print('sigma c', sig_new[c])
        print(sig_new)
        i += 1
        llh_new = np.array([i, log_lh(x_n, mu_new, sig_new, pic_new)])

        log_array = np.append([log_array], [llh_new])
        diff_log = llh_new[1] - llh1
        llh1 = llh_new[1]

        pi_c = pic_new
        mu = mu_new
        sigma = sig_new
        #theta_old = theta_new
        #print('log_likelihood:', log_array)
    return (log_array, mu_new, sig_new, pic_new)
Esempio n. 38
0
File: GMM.py Progetto: Emin4545/GMM
def normal_i_k(mu, cova, vector):
    N = multivariate_normal.pdf(vector, mean=mu, cov=cova)
    return N
Esempio n. 39
0
def likelihood_adap_cov(distance_moduli,distance_moduli_mean):
    p=multivariate_normal.pdf(distance_moduli,distance_moduli_mean,cov_matrix)
    if(p!=0):
        return np.log(p);
    else:
        return (-np.inf);
Esempio n. 40
0
su2 = 0
pos1 = []
pos2 = []
#look at the pdf for classes
for i in range(totalclass):
    Train_class.append(classifytraindata(TrainPGC, labelPGC, i,
                                         dimension))  #Classify traindata
    Train_class_mean.append(estimatemean(Train_class[i]))  #Estimate mean
    Train_class_cov.append(estimatecov(
        Train_class[i]))  #Estimate full covariance matrix
    Train_class_var.append(np.diag(np.diag(estimatecov(
        Train_class[i]))))  #Estimate diagonal covariance matrix
    PC.append(estimatepc(Train_class[i], TrainPGC))
    y1.append(
        multivariate_normal.pdf(TestPGC,
                                mean=Train_class_mean[i],
                                cov=Train_class_cov[i],
                                allow_singular=True))
    y2.append(
        multivariate_normal.pdf(TestPGC,
                                mean=Train_class_mean[i],
                                cov=Train_class_var[i],
                                allow_singular=True))
""" ======================== Cross Validation ============================= """
""" Here you should test your parameters with validation data """
#Calculate posterior
for i in range(totalclass):
    su1 = su1 + PC[i] * y1[i]
    su2 = su2 + PC[i] * y2[i]
for i in range(totalclass):
    pos1.append(PC[i] * y1[i] / su1)
    pos2.append(PC[i] * y2[i] / su2)
Esempio n. 41
0
params, unsupervised_forecasts, unsupervised_posterior, unsupervised_loglikelihoods = run_em(
    d, 3)


# Generate grid points
def gen_grid(df, a):
    ron = rong(df)
    men = [st.mean(ron[1]), st.mean(ron[0])]
    dest = [men[0] - ron[1][0], men[1] - ron[0][0]]

    x, y = np.meshgrid(
        np.linspace(men[0] - a * dest[0], men[0] + dest[0] * a, 100),
        np.linspace(men[1] - a * dest[1], men[1] + dest[1] * a, 100))
    return np.column_stack([x.flat, y.flat]), x, y


# density values at the grid points
r, X, Y = gen_grid(dafp, 1.3)
Z = mvn.pdf(r, params['mu'][0], params['sigma'][0]).reshape(X.shape)

# arbitrary contour levels
contour_level = [0.1, 0.2, 0.3]

params, unsupervised_forecastsforecasts, unsupervised_posterior, unsupervised_loglikelihoods = REM(
    dafp, ['longitude', 'latitude'], 3)
for i in range(3):
    Z = mvn.pdf(r, params['mu'][i], params['sigma'][i]).reshape(X.shape)
    plt.contour(X, Y, Z, levels=contour_level)
plt.scatter(x=dafp.longitude, y=dafp.latitude, s=0.2)
Esempio n. 42
0
def cmpmc(log_target,
          d,
          D=10,
          M=50,
          I=200,
          K=5,
          var_prop=1,
          bounds=(-10, 10),
          alpha=2,
          eta_rho0=0,
          eta_mu0=1,
          eta_prec0=0.1,
          g_rho_max=0.1,
          g_mu_max=0.5,
          g_prec_max=0.25,
          Kthin=1,
          var_mcmc=1):
    """
    Runs the controlled mixture population Monte Carlo algorithm
    :param log_target: Logarithm of the target distribution
    :param d: Dimension of the sampling space
    :param D: Number of mixture components
    :param M: Number of samples to draw per iteration
    :param I: Number of iterations
    :param K: Number of MH steps at each iteration for each Markov chain
    :param var_prop: Initial variance of each proposal distribution
    :param bounds: Prior to generate mixture means over [bounds]**d hypercube
    :param alpha: Renyi divergence parameter (must be greater than 1)
    :param eta_rho0: Initial learning rate for mixture weights
    :param eta_mu0: Initial learning rate for mixture component means
    :param eta_prec0: Initial learning rate for mixture component precision matrix
    :param g_rho_max: Maximum norm of mixture weight gradient
    :param g_mu_max: Maximum norm of mixture mean gradient
    :param g_prec_max: Maximum norm of mixture precision matrix gradient
    :param Kthin: Thinning parameter for the MH algorithm
    :param var_mcmc: Perturbation variance of the MCMC state
    :return MPMCSampler object
    """
    # Initialize the weights of the mixture proposal
    rho = np.ones(D) / D

    # Initialize the means of the mixture proposal
    mu = np.random.uniform(bounds[0], bounds[1], (D, d))

    # Initialize the covariances/precisions of the mixture proposal
    sig = np.tile(var_prop * np.eye(d), (D, 1, 1))
    prec = np.tile((1 / var_prop) * np.eye(d), (D, 1, 1))

    # Initialize storage of particles and log weights
    particles = np.zeros((M * I, d))
    log_weights = np.ones(M * I) * (-np.inf)
    mix_weights = np.zeros((I + 1, D))
    means = np.zeros((D * (I + 1), d))
    covariances = np.tile(np.zeros((d, d)), (D * (I + 1), 1, 1))

    # Initialize storage of evidence and target mean estimates
    evidence = np.zeros(I)
    target_mean = np.zeros((I, d))

    # Initialize proposal parameters
    mix_weights[0, :] = rho
    means[0:D] = mu
    covariances[0:D] = sig

    # Initialize RMS prop parameters
    vrho = np.zeros(D)
    vmu = np.zeros((D, d))
    vprec = np.zeros((D, d, d))

    # Initialize the states of the D Markov chains
    chain = np.copy(mu)
    # Initialize start counter
    start = 0
    startd = 0

    # Loop for the algorithm
    for i in tqdm(range(I)):
        # Update start counter
        stop = start + M
        stopd = startd + D

        # Generate particles
        idx = np.random.choice(D, M, replace=True, p=rho)
        children = np.random.multivariate_normal(np.zeros(d), np.eye(d), M)
        for j in range(D):
            children[idx == j] = mu[j] + np.matmul(
                children[idx == j],
                np.linalg.cholesky(sig[j]).T)
        particles[start:stop] = children

        # Compute log proposal
        log_prop_j = np.zeros((M, D))
        for j in range(D):
            log_prop_j[:, j] = mvn.logpdf(children,
                                          mean=mu[j],
                                          cov=sig[j],
                                          allow_singular=True)
        # Find the maximum log weight
        max_log_weight = np.max(log_prop_j)
        # Determine the equivalent log proposal as
        log_prop = max_log_weight + np.log(
            np.average(
                np.exp(log_prop_j - max_log_weight), weights=rho, axis=1))

        # Compute log weights and store
        log_target_eval = log_target(children)
        log_w = log_target_eval - log_prop
        log_weights[start:stop] = log_w

        # Compute estimate of evidence
        max_log_weight = np.max(log_weights[0:stop])
        weights = np.exp(log_weights[0:stop] - max_log_weight)
        log_z = np.log(1 /
                       (M *
                        (i + 1))) + max_log_weight + np.log(np.sum(weights))
        evidence[i] = np.exp(log_z)

        # Compute estimate of the target mean
        target_mean[i] = np.average(particles[0:stop, :],
                                    axis=0,
                                    weights=weights)

        # Copy the parameters of the mixture
        rho_temp = np.copy(rho)
        mu_temp = np.copy(mu)
        sig_temp = np.copy(sig)
        # Adapt the parameters of the proposal distribution
        for j in range(D):
            # PART 1: Obtain target samples by sampling from a Markov chain
            mcmc_run = metropolis_hastings(log_target,
                                           chain[j],
                                           var_x=var_mcmc,
                                           T=K,
                                           burn_in=0,
                                           thinning_rate=Kthin)
            z_d = mcmc_run.samples

            # PART 2: Evaluate proposal and log proposal
            prop_z = np.zeros(np.shape(z_d)[0])
            prop_z_k = np.zeros((np.shape(z_d)[0], D))
            for jk in range(D):
                prop_z_k[:, jk] = mvn.pdf(z_d,
                                          mean=mu_temp[jk],
                                          cov=sig_temp[jk],
                                          allow_singular=True)
                prop_z += rho_temp[jk] * prop_z_k[:, jk]
            log_prop_z = np.log(prop_z)
            log_target_z = np.asarray([log_target(z_d)]).T

            # PART 3: Compute the gradient of mixand weights, mean, and  precision
            g_rho = 0  # initialize gradient of mixand weight
            g_mu = np.zeros(d)  # initialize gradient of mean
            g_prec = np.zeros(
                (d, d))  # initialize gradient of precision matrix
            # Use a loop to make sure you are doing it correctly
            for k in range(np.shape(z_d)[0]):
                factor = np.exp(
                    (alpha - 1) *
                    (log_target_z[k] - log_prop_z[k])) * (prop_z_k[k, j] /
                                                          prop_z[k])
                g_rho += factor
                temp_var = np.asmatrix((z_d[k] - mu[j])).T
                g_mu += factor * np.asarray(np.matmul(prec[j],
                                                      temp_var)).squeeze()
                g_prec += (1 / 2) * (sig_temp[j] - temp_var * temp_var.T)
            g_rho *= (1 - alpha) / np.shape(z_d)[0]
            g_mu *= (rho_temp[j] * (1 - alpha)) / np.shape(z_d)[0]
            g_prec *= (rho_temp[j] * (1 - alpha)) / np.shape(z_d)[0]

            # PART 4: Clip the stochastic gradients so they do not get too large
            if np.abs(g_rho) > g_rho_max:
                g_rho = g_rho * (g_rho_max / np.abs(g_rho))
            if np.linalg.norm(g_mu) > g_mu_max:
                g_mu = g_mu * (g_mu_max / np.linalg.norm(g_mu))
            if np.linalg.norm(g_prec) > g_prec_max:
                g_prec = g_prec * (g_prec_max / np.linalg.norm(g_prec))

            # PART 5: Taking the stochastic gradient step
            # Compute square of the gradient
            drho_sq = g_rho**2
            dmu_sq = g_mu**2
            dprec_sq = g_prec**2
            # Update elementwise learning rate parameters
            vrho[j] = 0.9 * vrho[j] + 0.1 * drho_sq
            vmu[j] = 0.9 * vmu[j] + 0.1 * dmu_sq
            vprec[j] = 0.9 * vprec[j] + 0.1 * dprec_sq
            # Compute the learning rates
            eta_rho = eta_rho0 * (np.sqrt(vrho[j]) + 1e-3)**(-1)
            eta_mu = eta_mu0 * (np.sqrt(vmu[j]) + 1e-3)**(-1)
            eta_prec = eta_prec0 * (np.sqrt(vprec[j]) + 1e-3)**(-1)
            # Make stochastic gradient updates
            if eta_rho0 > 0:
                rho[j] = rho[j] - eta_rho * g_rho
            if eta_mu0 > 0:
                mu[j] = mu[j] - eta_mu * g_mu
                chain[j] = mu[j]
            if eta_prec0 > 0:
                # Update and project the precision matrix onto the set of PSD matrices
                prec[j] = prec[j] - eta_prec * g_prec
                # Obtain the corresponding covariance matrix
                sig[j] = np.linalg.inv(prec[j])

        # Add small number to weights and project onto the simplex
        rho = other_funcs.projection_simplex_sort(rho + 1e-3)

        # Store parameters
        mix_weights[i + 1] = rho
        means[startd + D:stopd + D] = mu
        covariances[startd + D:stopd + D] = sig

        # Update start counters
        start = stop
        startd = stopd

    # Generate output
    output = MPMCSampler(particles, log_weights, mix_weights, means,
                         covariances, evidence, target_mean)

    return output
Esempio n. 43
0
 def gaussian(data, pixel):
     return mvn.pdf(data, mean=pixel, cov=spread)
Esempio n. 44
0
def likelihood(distance_moduli):
    p=multivariate_normal.pdf(distance_moduli,distance_moduli_data,cov_matrix)
    if(p!=0):
        return np.log(p);
    else:
        return (-np.inf);
Esempio n. 45
0
 def __call__(self, theta):
     sigma = np.diag(self._std ** 2)
     return multivariate_normal.pdf(theta, self._mu, sigma)
Esempio n. 46
0
 def pdf(self, xs):
     return multivariate_normal.pdf(xs,
                                    self._mu,
                                    self._cov,
                                    allow_singular=True)
Esempio n. 47
0
 def __call__(self, theta):
     sigma = self._chol_sigma.dot(self._chol_sigma.T)
     return multivariate_normal.pdf(theta, self._mu, sigma)
Esempio n. 48
0
def mpmc(log_target, d, D=10, M=50, I=200, var_prop=1, bounds=(-10, 10)):
    """
    Runs the mixture population Monte Carlo algorithm
    :param log_target: Logarithm of the target distribution
    :param d: Dimension of the sampling space
    :param D: Number of proposals
    :param M: Number of samples to draw
    :param I: Number of iterations
    :param var_prop: Initial variance of each proposal distribution
    :param bounds: Prior to generate location parameters over [bounds]**d hypercube
    :return MPMCSampler object
    """
    # Initialize the weights of the mixture proposal
    rho = np.ones(D) / D

    # Initialize the means of the mixture proposal
    mu = np.random.uniform(bounds[0], bounds[1], (D, d))

    # Initialize the covariances of the mixture proposal
    sig = np.tile(var_prop * np.eye(d), (D, 1, 1))

    # Initialize storage of particles and log weights
    particles = np.zeros((M * I, d))
    log_weights = np.ones(M * I) * (-np.inf)
    mix_weights = np.zeros((I + 1, D))
    means = np.zeros((D * (I + 1), d))
    covariances = np.tile(np.zeros((d, d)), (D * (I + 1), 1, 1))

    # Set initial locations to be the parents
    mix_weights[0, :] = rho
    means[0:D] = mu
    covariances[0:D] = sig

    # Initialize storage of evidence and target mean estimates
    evidence = np.zeros(I)
    target_mean = np.zeros((I, d))

    # Initialize start counter
    start = 0
    startd = 0

    # Loop for the algorithm
    for i in tqdm(range(I)):
        # Update start counter
        stop = start + M
        stopd = startd + D

        # Generate particles
        idx = np.random.choice(D, M, replace=True, p=rho)
        children = np.random.multivariate_normal(np.zeros(d), np.eye(d), M)
        for j in range(D):
            children[idx == j] = mu[j] + np.matmul(
                children[idx == j],
                np.linalg.cholesky(sig[j]).T)
        particles[start:stop] = children

        # Compute log proposal
        prop = np.zeros(M)
        for j in range(D):
            prop += rho[j] * mvn.pdf(
                children, mean=mu[j], cov=sig[j], allow_singular=True)
        log_prop = np.log(prop)

        # Compute log weights and store
        log_w = log_target(children) - log_prop
        log_weights[start:stop] = log_w

        # Convert log weights to standard weights using LSE and normalize
        w = np.exp(log_w - np.max(log_w))
        w = w / np.sum(w)

        # Compute estimate of evidence
        max_log_weight = np.max(log_weights[0:stop])
        weights = np.exp(log_weights[0:stop] - max_log_weight)
        log_z = np.log(1 /
                       (M *
                        (i + 1))) + max_log_weight + np.log(np.sum(weights))
        evidence[i] = np.exp(log_z)

        # Compute estimate of the target mean
        target_mean[i, :] = np.average(particles[0:stop, :],
                                       axis=0,
                                       weights=weights)

        # Adapt the parameters of the proposal distribution
        for j in range(D):
            # Compute the RB factor
            alpha = rho[j] * mvn.pdf(children, mean=mu[j, :],
                                     cov=sig[j, :, :]) / prop + 1e-6
            # Update the weight
            rho[j] = np.sum(w * alpha)
            # Compute normalized weights
            wn = w * alpha / rho[j]
            # Update the proposal mean
            mu[j] = np.average(children, axis=0, weights=wn)
            # Update the proposal covariance (add small number to ensure positive definiteness)
            sig[j] = np.cov(children, rowvar=False, bias=True,
                            aweights=wn) + 1e-6 * np.eye(d)

        # Add small number to weights and normalize
        rho = rho / np.sum(rho)

        # Store parameters
        mix_weights[i + 1] = rho
        means[startd + D:stopd + D] = mu
        covariances[startd + D:stopd + D] = sig

        # Update start counters
        start = stop
        startd = stopd

    # Generate output
    output = MPMCSampler(particles, log_weights, mix_weights, means,
                         covariances, evidence, target_mean)

    return output
Esempio n. 49
0
def test_rangeratemodels(h, modelclass, state_vec, ndim_state, pos_mapping,
                         vel_mapping, noise_covar, position, orientation):
    """ Test for the CartesianToBearingRangeRate and
    CartesianToElevationBearingRangeRate Measurement Models """

    state = State(state_vec)

    # Check default translation_offset, rotation_offset and velocity is applied
    model_test = modelclass(ndim_state=ndim_state,
                            mapping=pos_mapping,
                            velocity_mapping=vel_mapping,
                            noise_covar=noise_covar)

    assert len(model_test.translation_offset) == 3
    assert len(model_test.rotation_offset) == 3
    assert len(model_test.velocity) == 3

    # Create and a measurement model object
    model = modelclass(ndim_state=ndim_state,
                       mapping=pos_mapping,
                       velocity_mapping=vel_mapping,
                       noise_covar=noise_covar,
                       translation_offset=position,
                       rotation_offset=orientation)

    # Project a state through the model
    # (without noise)
    meas_pred_wo_noise = model.function(state)
    eval_m = h(state_vec, model.mapping, model.velocity_mapping,
               model.translation_offset, model.rotation_offset, model.velocity)
    assert np.array_equal(meas_pred_wo_noise, eval_m)

    # Ensure ```lg.transfer_function()``` returns H
    def fun(x):
        return model.function(x)

    H = compute_jac(fun, state)
    assert np.array_equal(H, model.jacobian(state))

    # Check Jacobian has proper dimensions
    assert H.shape == (model.ndim_meas, ndim_state)

    # Ensure inverse function returns original
    if isinstance(model, ReversibleModel):
        J = model.inverse_function(State(meas_pred_wo_noise))
        assert np.allclose(J, state_vec)

    # Ensure ```lg.covar()``` returns R
    assert np.array_equal(noise_covar, model.covar())

    # Ensure model creates noise
    rvs = model.rvs()
    assert rvs.shape == (model.ndim_meas, 1)
    assert isinstance(rvs, StateVector)
    rvs = model.rvs(10)
    assert rvs.shape == (model.ndim_meas, 10)
    assert isinstance(rvs, StateVectors)
    # StateVector is subclass of Matrix, so need to check explicitly.
    assert not isinstance(rvs, StateVector)

    # Project a state throught the model
    # Project a state through the model
    # (without noise)
    meas_pred_wo_noise = model.function(state)
    assert np.array_equal(
        meas_pred_wo_noise,
        h(state_vec, model.mapping, model.velocity_mapping,
          model.translation_offset, model.rotation_offset, model.velocity))

    # Evaluate the likelihood of the predicted measurement, given the state
    # (without noise)
    prob = model.pdf(State(meas_pred_wo_noise), state)
    assert approx(prob) == multivariate_normal.pdf(
        (meas_pred_wo_noise -
         h(state_vec, model.mapping, model.velocity_mapping,
           model.translation_offset, model.rotation_offset,
           model.velocity)).ravel(),
        cov=noise_covar)

    # Propagate a state vector through the model
    # (with internal noise)
    meas_pred_w_inoise = model.function(state, noise=True)
    assert not np.array_equal(
        meas_pred_w_inoise,
        h(state_vec, model.mapping, model.velocity_mapping,
          model.translation_offset, model.rotation_offset, model.velocity))

    # Evaluate the likelihood of the predicted state, given the prior
    # (with noise)
    prob = model.pdf(State(meas_pred_w_inoise), state)
    assert approx(prob) == multivariate_normal.pdf(
        (meas_pred_w_inoise -
         h(state_vec, model.mapping, model.velocity_mapping,
           model.translation_offset, model.rotation_offset,
           model.velocity)).ravel(),
        cov=noise_covar)

    # Propagate a state vector throught the model
    # (with external noise)
    noise = model.rvs()
    meas_pred_w_enoise = model.function(state, noise=noise)
    assert np.array_equal(
        meas_pred_w_enoise,
        h(state_vec, model.mapping, model.velocity_mapping,
          model.translation_offset, model.rotation_offset, model.velocity) +
        noise)

    # Evaluate the likelihood of the predicted state, given the prior
    # (with noise)
    prob = model.pdf(State(meas_pred_w_enoise), state)
    assert approx(prob) == multivariate_normal.pdf(
        (meas_pred_w_enoise -
         h(state_vec, model.mapping, model.velocity_mapping,
           model.translation_offset, model.rotation_offset,
           model.velocity)).ravel(),
        cov=noise_covar)
Esempio n. 50
0
def apis(log_target, d, D=10, N=5, I=200, var_prop=1, bounds=(-10, 10)):
    """
    Runs the adaptive population importance sampling algorithm
    :param log_target: Logarithm of the target distribution
    :param d: Dimension of the sampling space
    :param D: Number of proposals
    :param N: Number of samples to draw per proposal
    :param I: Number of iterations
    :param var_prop: Variance of each proposal distribution
    :param bounds: Prior to generate location parameters over [bounds]**d hypercube
    :return APISSampler object
    """
    # Determine the total number of particles
    M = D * N

    # Initialize the means of the mixture proposal
    mu = np.random.uniform(bounds[0], bounds[1], (D, d))

    # Initialize storage of particles and log weights
    particles = np.zeros((M * I, d))
    log_weights = np.ones(M * I) * (-np.inf)
    means = np.zeros((D * (I + 1), d))

    # Set initial locations to be the parents
    means[0:D] = mu

    # Initialize storage of evidence and target mean estimates
    evidence = np.zeros(I)
    target_mean = np.zeros((I, d))

    # Initialize start counter
    start = 0
    startd = 0

    # Loop for the algorithm
    for i in tqdm(range(I)):
        # Update start counter
        stop = start + M
        stopd = startd + D

        # Generate particles
        children = np.repeat(
            mu, N, axis=0) + np.sqrt(var_prop) * np.random.multivariate_normal(
                np.zeros(d), np.eye(d), M)
        particles[start:stop] = children

        # Compute log proposal
        log_prop_j = np.zeros((M, D))
        prop = np.zeros(M)
        for j in range(D):
            log_prop_j[:, j] = mvn.pdf(children,
                                       mean=mu[j],
                                       cov=var_prop * np.eye(d),
                                       allow_singular=True)
            prop += log_prop_j[:, j] / D
        log_prop = np.log(prop)

        # Compute log weights and store
        log_target_eval = log_target(children)
        log_w = log_target_eval - log_prop
        log_weights[start:stop] = log_w

        # Compute estimate of evidence
        max_log_weight = np.max(log_weights[0:stop])
        weights = np.exp(log_weights[0:stop] - max_log_weight)
        log_z = np.log(1 /
                       (M *
                        (i + 1))) + max_log_weight + np.log(np.sum(weights))
        evidence[i] = np.exp(log_z)

        # Compute estimate of the target mean
        target_mean[i, :] = np.average(particles[0:stop, :],
                                       axis=0,
                                       weights=weights)

        # Adapt the parameters of the proposal distribution
        start_j = 0
        for j in range(D):
            # Update stop parameter
            stop_j = start_j + N
            # Get local children
            children_j = children[start_j:stop_j]
            # Obtain local log weights
            log_wj = log_target_eval[start_j:stop_j] - log_prop_j[
                start_j:stop_j, j]
            # Convert to weights using LSE
            wj = np.exp(log_wj - np.max(log_wj))
            # Normalize the weights
            wjn = wj / np.sum(wj)
            # Update the proposal mean
            mu[j] = np.average(children_j, axis=0, weights=wjn)
            # Update start parameter
            start_j = stop_j

        # Store parameters
        means[startd + D:stopd + D] = mu

        # Update start counters
        start = stop
        startd = stopd

    # Generate output
    output = APISSampler(particles, log_weights, means, evidence, target_mean)

    return output
    def LaserCallback(self, data):

        now = time.time()

        if not self.init_laser_flag:
            self.laser_rad = np.arange(data.angle_min, data.angle_max,
                                       data.angle_increment)
            if len(self.laser_rad) != len(data.ranges):
                # In an edge case, the length of 'data.ranges' may differ
                # from that of 'self.laser_rad'.
                # This is due to computation error and how to deal interval
                # end.
                # For example, the length of 'np.arange(1, 3, 0.5)' is 4 and
                # that of 'np.arange(1, 3+1.0e-6, 0.5)' is 5.
                #
                # In such a case, 'self.laser_rad' should be fixed
                # because this script assumes that 'data.ranges' and
                # 'self.laser_rad' have the same length.
                #
                num = len(data.ranges)
                inc = (data.angle_max - data.angle_min) / (num - 1)
                epsilon = 1.0e-6
                self.laser_rad = np.arange(data.angle_min,
                                           data.angle_max + epsilon, inc)
            #
            self.laser_rad_cos = np.cos(self.laser_rad)
            self.laser_rad_sin = np.sin(self.laser_rad)
            self.laser_center = len(data.ranges) / 2 + 1
            self.laser_size = len(data.ranges)
            self.init_laser_flag = True
            self.deg_list = np.empty(0)
            self.raw_list = np.empty(0)
        laser_raw = np.array(data.ranges)

        # clustering
        cluster_num, cluster = self.clustring_laser_info_by_distance_new(
            laser_raw)
        # new_cluster_num,new_cluster = self.clustring_laser_info_by_distance_new2(laser_raw)
        # assert(cluster_num == new_cluster_num)
        # assert(np.all(cluster == new_cluster))

        means = np.empty((cluster_num - 1, 2))
        covs = np.empty((cluster_num - 1, 2))
        for i in xrange(1, cluster_num):
            means[i - 1] = np.mean(zip(self.laser_rad[cluster == i],
                                       laser_raw[cluster == i]),
                                   axis=0)
            covs[i - 1] = np.var(zip(self.laser_rad[cluster == i],
                                     laser_raw[cluster == i]),
                                 axis=0)

        # random forest
        try:
            prediction = self.random_forest_for_laser_info_category(
                cluster_num, laser_raw, cluster, means, covs)
        except Exception as e:
            return

        # particle filter
        if not self.init_particle_flag:
            self.particle = np.empty((self.PARTICLE_NUM, 3))
            self.particle[:, 0] = np.zeros(self.PARTICLE_NUM)
            self.particle[:, 1] = np.ones(self.PARTICLE_NUM) * 1.0
            self.particle[:, 0] += (np.random.random(self.PARTICLE_NUM) *
                                    self.PARTICLE_RAD_PARAM * 2 -
                                    self.PARTICLE_RAD_PARAM)
            self.particle[:, 1] += (np.random.random(self.PARTICLE_NUM) *
                                    self.PARTICLE_RAW_PARAM * 2 -
                                    self.PARTICLE_RAW_PARAM)
            self.particle[:,
                          2] = np.ones(self.PARTICLE_NUM) / self.PARTICLE_NUM

            self.init_particle_flag = True
        else:
            self.particle[:, 0] += (np.random.random(self.PARTICLE_NUM) *
                                    self.PARTICLE_RAD_PARAM * 2 -
                                    self.PARTICLE_RAD_PARAM)
            self.particle[:, 1] += (np.random.random(self.PARTICLE_NUM) *
                                    self.PARTICLE_RAW_PARAM * 2 -
                                    self.PARTICLE_RAW_PARAM)

        pdf_lookup_table = np.empty((len(self.particle), cluster_num - 1))
        for j in xrange(1, cluster_num):
            pdf_lookup_table[:, j - 1] = multivariate_normal.pdf(
                self.particle[:, 0:2], means[j - 1], covs[j - 1])
        if prediction[:, 1].max() > self.HUMAN_PROBA_THR:
            prediction[prediction[:, 1] <= self.HUMAN_PROBA_THR, 1] = 0.0
            pps = (pdf_lookup_table * prediction[:, 1]).sum(axis=1)
        else:
            pps = pdf_lookup_table.sum(axis=1)
        self.particle[:, 2] += pps
        self.particle[:, 2] = self.particle[:, 2] / np.sum(self.particle[:, 2])

        index = np.random.choice(np.arange(self.PARTICLE_NUM),
                                 self.PARTICLE_NUM,
                                 p=self.particle[:, 2])
        self.particle = self.particle[index]

        # smoothing
        self.deg_list = np.append(
            self.deg_list,
            np.sum(self.particle[:, 0] * self.particle[:, 2]) /
            np.sum(self.particle[:, 2]))
        self.raw_list = np.append(
            self.raw_list,
            np.sum(self.particle[:, 1] * self.particle[:, 2]) /
            np.sum(self.particle[:, 2]))
        if len(self.deg_list) > self.SMOOTHING_LEN:
            self.deg_list = np.delete(self.deg_list, 0)
            self.raw_list = np.delete(self.raw_list, 0)
        smoothing_list = np.arange(len(self.deg_list))
        deg = np.sum(self.deg_list * smoothing_list) / np.sum(smoothing_list)
        raw = np.sum(self.raw_list * smoothing_list) / np.sum(smoothing_list)

        br = tf.TransformBroadcaster()
        #br.sendTransform((np.cos(deg)*raw,np.sin(deg)*raw,0), (0.0, 0.0, 0.0, 1.0), rospy.Time.now(), "trace_target", "base_range_sensor_link")

        # rviz
        if not self.OUTPUT_RVIZ: return
        hoge = PointCloud()
        hoge.header = data.header
        for x in xrange(1, cluster_num):
            p = Point()
            p.x = np.cos(means[x - 1, 0]) * means[x - 1, 1]
            p.y = np.sin(means[x - 1, 0]) * means[x - 1, 1]
            p.z = 0
            hoge.points.append(p)
        self.pub_rviz_mean.publish(hoge)
        hoge = PointCloud()
        hoge.header = data.header
        for x in xrange(self.PARTICLE_NUM):
            p = Point()
            p.x = np.cos(self.particle[x][0]) * self.particle[x][1]
            p.y = np.sin(self.particle[x][0]) * self.particle[x][1]
            p.z = 0
            hoge.points.append(p)
        self.pub_rviz_particle.publish(hoge)
        hoge = PointStamped()
        hoge.header = data.header
        hoge.point.x = np.cos(deg) * raw
        hoge.point.y = np.sin(deg) * raw
        self.pub_rviz_point.publish(hoge)
def globCoupFeatureSetMoveWithChangePoints(data,
                                           X,
                                           y,
                                           mu,
                                           alpha_gamma_sigma_sqr,
                                           beta_gamma_sigma_sqr,
                                           lambda_sqr,
                                           sigma_sqr,
                                           pi,
                                           fanInRestriction,
                                           featureDimensionSpace,
                                           numSamples,
                                           it,
                                           change_points,
                                           method='',
                                           delta_sqr=[]):
    '''
    Documentation is missing for this function
  '''
    # TODO fix this redundant code delta param is not used here
    try:  # get the value of the current delta
        curr_delta_sqr = delta_sqr[it + 1]
    except IndexError:  # we are not in a method that requires delta^2
        curr_delta_sqr = []

    # Get the possible features set
    possibleFeaturesSet = list(data['features'].keys())
    possibleFeaturesSet = [
        int(x.replace('X', '')) for x in possibleFeaturesSet
    ]

    # Select a random add, delete or exchange move
    randomInteger = randint(0, 2)
    # Do the calculation according to the randomly selected move
    validMove = True
    if randomInteger == 0:
        # Add move
        try:
            piStar = addMove(pi, featureDimensionSpace, fanInRestriction,
                             possibleFeaturesSet)
            hr = (featureDimensionSpace - len(pi)) / len(
                piStar)  # HR calculation
        except ValueError:
            validMove = False
            piStar = pi
            hr = 0
    elif randomInteger == 1:
        # Delete Move
        try:
            piStar = deleteMove(pi, featureDimensionSpace, fanInRestriction,
                                possibleFeaturesSet)
            hr = len(pi) / (featureDimensionSpace - len(piStar)
                            )  # HR calculation
        except ValueError:
            validMove = False
            piStar = pi
            hr = 0
    elif randomInteger == 2:
        # Exchange move
        try:
            piStar = exchangeMove(pi, featureDimensionSpace, fanInRestriction,
                                  possibleFeaturesSet)
            hr = 1
        except ValueError:
            validMove = False
            piStar = pi
            hr = 0
    if validMove:
        # Construct the new X, mu
        partialData = {'features': {}, 'response': {}}
        for feature in piStar:
            currKey = 'X' + str(int(feature))
            partialData['features'][currKey] = data['features'][currKey]

        # Design Matrix Design tensor? Design NdArray?
        XStar = constructNdArray(partialData, numSamples, change_points)
        muDagger = constructMuMatrix(piStar)
        # Mu matrix star matrix (new)
        muStar, muStarDensity, _ = muSampler(muDagger, change_points, XStar, y,
                                             sigma_sqr[it + 1],
                                             lambda_sqr[it + 1])

        # Calculate marginal likelihook for PiStar
        logmarginalPiStar = calculateMarginalLikelihoodWithChangepoints(
            XStar, y, muStar, alpha_gamma_sigma_sqr, beta_gamma_sigma_sqr,
            lambda_sqr[it + 1], numSamples, change_points, method,
            curr_delta_sqr)
        # Calculate marginal likelihood for Pi
        logmarginalPi = calculateMarginalLikelihoodWithChangepoints(
            X, y, mu, alpha_gamma_sigma_sqr, beta_gamma_sigma_sqr,
            lambda_sqr[it + 1], numSamples, change_points, method,
            curr_delta_sqr)

        # Calculate mu density
        _, _, muDensity = muSampler(mu, change_points, X, y, sigma_sqr[it + 1],
                                    lambda_sqr[it + 1])

        # Calculate the prior probabilites of the move Pi -> Pi*
        piPrior = calculateFeatureSetPriorProb(pi, featureDimensionSpace,
                                               fanInRestriction)
        logpiPrior = math.log(piPrior)
        piStarPrior = calculateFeatureSetPriorProb(piStar,
                                                   featureDimensionSpace,
                                                   fanInRestriction)
        logpiStarPrior = math.log(piStarPrior)

        # Calculate the prior probabilities for mu and mu*
        # TODO functionalize this calculations
        muDagger = np.zeros(mu.shape[0])
        muDaggerPlus = np.zeros(
            muStar.shape[0])  # we need this in order to calc the density
        sigmaDagger = np.eye(muDagger.shape[0])
        sigmaDaggerPlus = np.eye(muDaggerPlus.shape[0])
        muStarPrior = multivariate_normal.pdf(muStar.flatten(),
                                              mean=muDaggerPlus.flatten(),
                                              cov=sigmaDaggerPlus)
        muPrior = multivariate_normal.pdf(mu.flatten(),
                                          mean=muDagger.flatten(),
                                          cov=sigmaDagger)

        # Calculate the final acceptance probability A(~) TODO this should be in log
        # terms to avoid underflow with very low densities!!!
        acceptanceRatio = min(
            1, logmarginalPiStar - logmarginalPi + logpiStarPrior -
            logpiPrior + math.log(muDensity) - math.log(muStarDensity) +
            math.log(muStarPrior) - math.log(muPrior) + math.log(hr))

        # Get a sample from the U(0,1) to compare the acceptance ratio
        u = np.random.uniform(0, 1)
        if u < math.exp(acceptanceRatio):
            # if the sample is less than the acceptance ratio we accept the move to Pi*
            X = XStar
            pi = piStar
            mu = muStar

    return pi, mu, X
Esempio n. 53
0
	def train(self):
		# Evaluate untrained policy
		self.evaluations = [evaluate_policy(self.env, self.policy, self.args)]
		self.log_dir = '{}/{}/{}_{}_seed_{}'.format(self.result_path, self.args.log_path,
													self.args.policy_name, self.args.env_name,
													self.args.seed)
		
		print("---------------------------------------")
		print("Settings: %s" % self.log_dir)
		print("---------------------------------------")
		if not os.path.exists(self.log_dir):
			os.makedirs(self.log_dir)
		
		if self.args.load_policy:
			self.policy.load(self.file_name + str(self.args.load_policy_idx), self.log_dir)
		
		# TesnorboardX
		if self.args.evaluate_Q_value:
			self.writer_train = SummaryWriter(logdir=self.log_dir + '_train')
		self.writer_test = SummaryWriter(logdir=self.log_dir)
		self.pbar = tqdm(total=self.args.max_timesteps, initial=self.total_timesteps, position=0, leave=True)
		
		done = True
		self.cumulative_reward = 0.
		self.steps_done = 0
		option_data = []
		
		# cum_obs, cum_new_obs, cum_action, cum_option, cum_next_option, cum_reward, done_bool = [], [], [], [], [], [], [], []
		while self.total_timesteps < self.args.max_timesteps:
			# ================ Train =============================================#
			self.train_once()
			
			if done:
				self.eval_once()
				self.reset()
				done = False
			
			# Select action randomly or according to policy
			if self.total_timesteps < self.args.start_timesteps:
				action = self.env.action_space.sample()
				p = 1
				self.option = np.random.randint(self.args.option_num)
				self.next_option = np.random.randint(self.args.option_num)
			else:
				if 'RNN' in self.args.policy_name:
					action = self.policy.select_action(np.array(self.obs_vec))
				elif 'SAC' in self.args.policy_name:
					action = self.policy.select_action(np.array(self.obs), eval=False)
				elif 'HRLACOP' == self.args.policy_name:
					EPS_START = 0.9
					EPS_END = 0.05
					EPS_DECAY = self.args.max_timesteps
					# change option and calculate reward
					if (self.total_timesteps > self.args.start_timesteps) and (
							self.total_timesteps % self.args.option_change == 0):
						# print(self.total_timesteps)
						# change option every K steps ::::::
						sample = random.random()
						eps_threshold = EPS_END + (EPS_START - EPS_END) * \
										math.exp(-1. * self.total_timesteps / EPS_DECAY)
						self.steps_done += 1
						self.next_high_obs = self.obs
						
						if sample > eps_threshold:
							# option, _, _ = self.policy.softmax_option_target([np.array(self.obs)])
							# self.next_option = option.cpu().data.numpy().flatten()[0]
							action, self.option = self.policy.select_action(np.array(self.obs),
																			self.option,
																			change_option=True)
						else:
							self.option = np.random.randint(self.args.option_num)
						
						self.replay_buffer_high.add(
							(self.high_obs, self.next_high_obs, self.option, self.next_option, self.cumulative_reward))
						self.high_obs = self.next_high_obs
						
						self.auxiliary_reward = self.cumulative_reward / self.args.option_change
						option_data = np.array(option_data)
						option_data[:, -2] = self.auxiliary_reward
						for i in range(len(option_data)):
							self.replay_buffer_low.add(option_data[i])
						option_data = []
						
						self.cumulative_reward = 0.
					else:
						action, self.option = self.policy.select_action(np.array(self.obs),
																		self.option,
																		change_option=False)
				else:
					action = self.policy.select_action(np.array(self.obs))
				
				# exploration noise
				noise = np.random.normal(0,
										 self.args.expl_noise,
										 size=self.env.action_space.shape[0])
				if self.args.expl_noise != 0:
					action = (action + noise).clip(
						self.env.action_space.low, self.env.action_space.high)
				
				if 'HRLAC' == self.args.policy_name:
					p_noise = multivariate_normal.pdf(
						noise, np.zeros(shape=self.env.action_space.shape[0]),
						self.args.expl_noise * self.args.expl_noise * np.identity(noise.shape[0]))
					if 'SHRL' in self.args.policy_name:
						p = (p_noise * utils.softmax(self.policy.option_prob))[0]
					else:
						p = (p_noise * utils.softmax(self.policy.q_predict)[self.policy.option_val])[0]
			
			new_obs, reward, done, _ = self.env.step(action)
			# cum_obs.append(self.obs)
			# cum_new_obs.append(new_obs)
			# cum_action.append(action)
			# cum_option.append(self.option)
			# cum_reward.append(reward)
			
			self.cumulative_reward += reward
			self.episode_reward += reward
			auxiliary_reward = 0.
			
			done_bool = 0 if self.episode_timesteps + 1 == self.env._max_episode_steps else float(done)
			
			if 'HRLACOP' == self.args.policy_name:
				if self.total_timesteps <= self.args.start_timesteps:
					self.replay_buffer_low.add(
						(self.obs, new_obs, action, self.option, self.next_option, reward, auxiliary_reward, done_bool))
				else:
					option_data.append(
						(self.obs, new_obs, action, self.option, self.next_option, reward, auxiliary_reward, done_bool))
			if 'RNN' in self.args.policy_name:
				new_obs_vec = utils.fifo_data(np.copy(self.obs_vec), new_obs)
				self.replay_buffer.add((np.copy(self.obs_vec), new_obs_vec, action, reward, done_bool))
				self.obs_vec = utils.fifo_data(self.obs_vec, new_obs)
			elif 'HRL' in self.args.policy_name:
				self.replay_buffer.add((self.obs, new_obs, action, reward, done_bool, p))
			else:
				self.replay_buffer.add((self.obs, new_obs, action, reward, done_bool, 1.))
			
			self.obs = new_obs
			self.episode_timesteps += 1
			self.total_timesteps += 1
			self.timesteps_since_eval += 1
			self.timesteps_calc_Q_vale += 1
		
		# Final evaluation
		avg_reward = evaluate_policy(self.env, self.policy, self.args)
		self.evaluations.append(avg_reward)
		
		if self.best_reward < avg_reward:
			self.best_reward = avg_reward
			print("Best reward! Total T: %d Episode T: %d Reward: %f" %
				  (self.total_timesteps, self.episode_timesteps, avg_reward))
			self.policy.save(self.file_name, directory=self.log_dir)
		
		if self.args.save_all_policy:
			self.policy.save(self.file_name + str(int(self.args.max_timesteps)), directory=self.log_dir)
		
		np.save(self.log_dir + "/test_accuracy", self.evaluations)
		utils.write_table(self.log_dir + "/test_accuracy", np.asarray(self.evaluations))
		
		# save the replay buffer
		if self.args.save_data:
			self.replay_buffer_low.save_buffer(self.log_dir + "/buffer_data")
		if self.args.evaluate_Q_value:
			true_Q_value = cal_true_value(env=self.env, policy=self.policy,
										  replay_buffer=self.replay_buffer,
										  args=self.args)
			self.writer_test.add_scalar('Q_value', true_Q_value, self.total_timesteps)
			self.true_Q_vals.append(true_Q_value)
			utils.write_table(self.log_dir + "/estimate_Q_vals", np.asarray(self.estimate_Q_vals))
			utils.write_table(self.log_dir + "/true_Q_vals", np.asarray(self.true_Q_vals))
		self.env.reset()
Esempio n. 54
0
        center_labels = np.zeros((new_points.shape[0], 4))

        for _, semins in enumerate(sem_ins_labels):
            valid_ind = np.argwhere(ins_labels == semins)[:, 0]
            if semins == 0 or valid_ind.shape[
                    0] < 5:  # background classes and small groups
                continue

            x_min = np.min(new_points[valid_ind, 0])
            x_max = np.max(new_points[valid_ind, 0])
            y_min = np.min(new_points[valid_ind, 1])
            y_max = np.max(new_points[valid_ind, 1])
            z_min = np.min(new_points[valid_ind, 2])
            z_max = np.max(new_points[valid_ind, 2])

            center_point[0][0] = (x_min + x_max) / 2
            center_point[0][1] = (y_min + y_max) / 2
            center_point[0][2] = (z_min + z_max) / 2

            gaussians = multivariate_normal.pdf(new_points[valid_ind, 0:3],
                                                mean=center_point[0, :3],
                                                cov=covariance)
            gaussians = (gaussians - min(gaussians)) / (max(gaussians) -
                                                        min(gaussians))
            center_labels[valid_ind,
                          0] = gaussians  # first loc score for centerness
            center_labels[valid_ind, 1:4] = center_point[0, :3] - new_points[
                valid_ind, 0:3]  # last 3 for offset to center

        np.save(save_path, center_labels)
 def likelihood(self, samples: np.ndarray) -> np.ndarray:
     return np.atleast_1d(
         multivariate_normal.pdf(samples.T, loc=self._mean, scale=self._covariance_full)
     )
    def __init__(self,
                 mean=np.array([0, 0]),
                 cov=0.05**2 * np.eye(2),
                 num_train=100,
                 num_test=100,
                 map_function=None,
                 rseed=None):
        """Generate a new dataset.

        The input data x for train and test samples will be drawn iid from the
        given Gaussian. Per default, the map function is the probability
        density of the given Gaussian: y = f(x) = p(x).

        Args:
            mean: The mean of the Gaussian.
            cov: The covariance of the Gaussian.
            num_train: Number of training samples.
            num_test: Number of test samples.
            map_function (optional): A function handle that receives input
                samples and maps them to output samples. If not specified, the
                density function will be used as map function.
            rseed (int): If ``None``, the current random state of numpy is used
                to generate the data. Otherwise, a new random state with the
                given seed is generated.
        """
        super().__init__()

        if rseed is None:
            rand = np.random
        else:
            rand = np.random.RandomState(rseed)

        n_x = mean.size
        assert (n_x == 2)  # Only required when using plotting functions.

        train_x = rand.multivariate_normal(mean, cov, size=num_train)
        test_x = rand.multivariate_normal(mean, cov, size=num_test)

        if map_function is None:
            map_function = lambda x : multivariate_normal.pdf(x, mean, cov). \
                reshape(-1, 1)

            # f(x) = p(x)
            train_y = map_function(train_x)
            test_y = map_function(test_x)
        else:
            train_y = map_function(train_x)
            test_y = map_function(test_x)

        # Specify internal data structure.
        self._data['classification'] = False
        self._data['sequence'] = False
        self._data['in_data'] = np.vstack([train_x, test_x])
        self._data['in_shape'] = [n_x]
        self._data['out_data'] = np.vstack([train_y, test_y])
        self._data['out_shape'] = [1]
        self._data['train_inds'] = np.arange(num_train)
        self._data['test_inds'] = np.arange(num_train, num_train + num_test)

        self._mean = mean
        self._cov = cov
        self._map = map_function
Esempio n. 57
0
    def condition(self, X_input=None, Xerr_input=None, X_dict=None):
        """Condition the model based on known values for some
        features.

        Parameters
        ----------
        X_input : array_like (optional), shape = (n_features, )
            An array of input values. Inputs set to NaN are not set, and
            become features to the resulting distribution. Order is
            preserved. Either an X array or an X dictionary is required
            (default=None).
        Xerr_input: array_like (optional), shape  = (n_features, )
            Errors for input values. Indeces not being used for
            conditioning should be set to 0.0. If None, no additional
            error is included in the conditioning (default=None).
        X_dict: dictionary (optional), shape = (n_features, )
            A dictionary containing label:float or label:tuple pairs.
            If labels correspond to floats, no additional error is
            included in the conditioning. If tuples, it is assumed that
            the structs have the form (X_value, X_err). If self.labels
            is None, an error will be thrown. Dictionary values will
            supercede any X_input or Xerr_input arrays. Either an X
            array or an X dictionary is required (default=None).

        Returns
        -------
        cond_xdgmm: XDGMM object
            n_features = self.n_features-(n_features_conditioned)
            n_components = self.n_components
        """
        if self.V is None or self.mu is None or self.weights is None:
            raise Exception("Model parameters not set.")

        if X_input is None and X_dict is None:
            raise Exception("X values or dictionary must be given.")

        if X_input is None and X_dict is not None and \
            self.labels is None:
            raise Exception("Labels array is required for " +
                            "dictionary option to be used.")

        if X_dict is not None:
            X = []
            Xerr = []
            for i in range(len(self.labels)):
                label = self.labels[i]
                if label in X_dict:
                    if isinstance(X_dict[label], float):
                        X.append(X_dict[label])
                        Xerr.append(0.0)
                    elif isinstance(X_dict[label], tuple):
                        X.append(X_dict[label][0])
                        Xerr.append(X_dict[label][1])
                else:
                    X.append(np.nan)
                    Xerr.append(0.0)
            X = np.array(X)
            Xerr = np.array(Xerr)

        else:
            X = X_input
            Xerr = Xerr_input

        new_mu = []
        new_V = []
        pk = []

        not_set_idx = np.nonzero(np.isnan(X))[0]
        set_idx = np.nonzero(True ^ np.isnan(X))[0]
        x = X[set_idx]
        covars = np.copy(self.V)

        if Xerr is not None:
            for i in set_idx:
                covars[:, i, i] += Xerr[i]

        for i in range(self.n_components):
            a = []
            a_ind = []
            A = []
            b = []
            B = []
            C = []

            for j in range(len(self.mu[i])):
                if j in not_set_idx:
                    a.append(self.mu[i][j])
                else:
                    b.append(self.mu[i][j])

            for j in not_set_idx:
                tmp = []
                for k in not_set_idx:
                    tmp.append(covars[i][j, k])
                A.append(np.array(tmp))

                tmp = []
                for k in set_idx:
                    tmp.append(covars[i][j, k])
                C.append(np.array(tmp))

            for j in set_idx:
                tmp = []
                for k in set_idx:
                    tmp.append(covars[i][j, k])
                B.append(np.array(tmp))

            a = np.array(a)
            b = np.array(b)
            A = np.array(A)
            B = np.array(B)
            C = np.array(C)

            mu_cond = a + np.dot(C, np.dot(np.linalg.inv(B), (x - b)))
            V_cond = A - np.dot(C, np.dot(np.linalg.inv(B), C.T))

            new_mu.append(mu_cond)
            new_V.append(V_cond)

            pk.append(
                multivariate_normal.pdf(x, mean=b, cov=B, allow_singular=True))

        new_mu = np.array(new_mu)
        new_V = np.array(new_V)
        pk = np.array(pk).flatten()
        new_weights = self.weights * pk
        new_weights = new_weights / np.sum(new_weights)

        if self.labels is not None:
            new_labels = self.labels[not_set_idx]
        else:
            new_labels = None

        return XDGMM(n_components=self.n_components,
                     n_iter=self.n_iter,
                     method=self.method,
                     V=new_V,
                     mu=new_mu,
                     weights=new_weights,
                     labels=new_labels)
Esempio n. 58
0
gaussianGroup = getGaussians(dataPoints, clusters)
# totalLogLike = getLoglikelihood(gaussianGroup, dataPoints)

print(gaussianGroup)

while not stop:

    newClusters = [[], [], []]
    for point in dataPoints:

        cluster = []
        # re-calculate ric for each point
        probs = []
        total = 0
        for g in gaussianGroup:
            prob = g[2] * multivariate_normal.pdf(
                point, mean=g[0], cov=g[1], allow_singular=True)
            # prob = two_d_gaussian(g[0], g[1], g[2], point)
            total = total + prob
            probs.append(prob)

        for i in range(0, 3):
            value = probs[i]
            ric = value / total
            newClusters[i].append(ric)

    currentgaussianGroup = getGaussians(dataPoints, newClusters)
    print("current Gaussians")
    for gauss in currentgaussianGroup:
        print(gauss)

    check1 = True
                Nk[k] = Cl[k].shape[0]
                Prob_K[k] = Nk[k] / (N-1+alpha)

            # new room?
            if np.random.rand() < alpha / (N-1+alpha):
                # Candidate!
                ## Compute accept rate
                ### Sample new param
                mu_sampled, cov_sampled = Base_distribution_sampling(dim=2, sample_mean=sample_mean)
                try:
                    LL_old = multivariate_normal.logpdf(xi,mean=mu_dict[Cl_i],cov=cov_dict[Cl_i])
                    LL_new = multivariate_normal.logpdf(xi,mean=mu_sampled, cov=cov_sampled)
                except:
                    # print cov_dict[Cl_i], len(Cl[Cl_i])
                    # print cov_dict[Cl_i], np.linalg.det(cov_dict[Cl_i])
                    LL_old = multivariate_normal.pdf(xi,mean=mu_dict[Cl_i],cov=cov_dict[Cl_i]+1e-6*np.eye(dim))
                    LL_new = multivariate_normal.pdf(xi,mean=mu_sampled, cov=cov_sampled)

                ### Compute accept rate
                LPr_old = Base_disctribution_pdf(sample_mean,mu_dict[Cl_i], cov_dict[Cl_i], dim)
                LPr_new = Base_disctribution_pdf(sample_mean,mu_sampled, cov_sampled, dim)

                L_joint_old = LL_old + LPr_old
                L_joint_new = LL_new + LPr_new
                accept_rate = np.exp(min(0, L_joint_new - L_joint_old ))

                ## Determine to accept
                if np.random.rand() < accept_rate:
                    # accept!
                    Z_est[idx] = K_est + 1
                    Cl[K_est+1] = np.array([xi])
Esempio n. 60
0
def P_nopiel(x):
    result = 0.0
    for i in range(16):
        result += nopiel_w[i] * multivariate_normal.pdf(
            x, mean=nopiel_mu[i], cov=nopiel_sigma[i])
    return result