Example #1
0
    def log_likelihood(self):
        # mu = np.dot(self.Z, self.W.T)
        # return -0.5 * np.sum(((self.X - mu) * self.mask) ** 2 / self.sigmasq)

        # Compute the marginal likelihood, integrating out z
        mu_x = self.mean
        Sigma_x = self.W.dot(self.W.T) + np.diag(self.sigmasq)

        from scipy.stats import multivariate_normal
        if not np.all(self.mask):
            # Find the patterns of missing dta
            missing_patterns = np.unique(self.mask, axis=0)

            # Evaluate the likelihood for each missing pattern
            lls = np.zeros(self.N)
            for pat in missing_patterns:
                inds = np.all(self.mask == pat, axis=1)
                lls[inds] = \
                    multivariate_normal(mu_x[pat], Sigma_x[np.ix_(pat, pat)])\
                    .logpdf(self.X[np.ix_(inds, pat)])

        else:
            lls = multivariate_normal(mu_x, Sigma_x).logpdf(self.X)

        return lls
def cov_check():
    count_yes = 0
    count_no = 0
    cov_total = numpy.zeros((3,3))
    vector_total = []
    for i in xrange(10000):
        #vector = numpy.random.uniform(-1, 1, size=6)
        fi = numpy.zeros((3,3))
        fi[numpy.diag_indices(3)] = numpy.random.uniform(-10, 10, size=3)
        fi[numpy.tril_indices(3, -1)] += numpy.random.uniform(-10, 10, size=3)
        cov_mat = numpy.dot(fi, fi.T)
        try:
            multivariate_normal([1,1,1], cov=cov_mat)
            count_yes += 1
            cov_total += cov_mat
            vec = []
            vec.append(cov_total[0][0])
            vec.append(cov_total[1][1])
            vec.append(cov_total[2][2])
            vec.append(cov_total[0][1])
            vec.append(cov_total[0][2])
            vec.append(cov_total[1][2])
            vector_total.append(vec)
        except:
            print "nope"
            print cov_mat
            count_no += 1
    print count_yes, count_no
    return cov_total, numpy.array(vector_total)
def Gibbs_sample_slow_scipy_version(param_dict, non_inf=False):        
    '''Draw a sample from the Normal-Wishart model using the buit-in scipy distributions( significantly slower).
    Based on Gelman et al.(2013, 3 ed., chapter 4).  
    Parameters
    ---------
    param_dict: python-dict
    dictionary with sufficient statistics and parameters from a multivariate data-set,
    obtained through the functions 'make_param_dict' and 'update_param_dict'.
    
    non_inf: Boolean.
    This models tends weight heavily the distance from the prior to the empirical mean.
    If a non-informative prior is used, it's advisable to use this option. The parametrization
    will correspond to the  multivariate Jeffreys prior density. 
    
    Output
    --------
    Returns a d-dimensional draw from the Normal-Wishart model. For multiple samples,
    use the function 'Gibbs_sampler' with one of the options:
    ('slow'): non_inf = False
    ('nonInfSlow'): non_inf = True
    '''
    if non_inf:
        Prec_m = sts.wishart(df = param_dict['n']-1., scale=param_dict['invS_m']).rvs()
        Sigma_m = np.linalg.inv(Prec_m)
        mu = sts.multivariate_normal(mean= param_dict['E_mu'], cov=(1./param_dict['n'])*Sigma_m).rvs()
        return sts.multivariate_normal(mean=mu, cov = Sigma_m).rvs()
        
        
    else:
                    
        Prec_m = sts.wishart(df = param_dict['up_v_0'], scale=param_dict['up_Prec_0']).rvs()
        Sigma_m = np.linalg.inv(Prec_m)
        mu = sts.multivariate_normal(mean= param_dict['up_mu_0'], cov=(1./param_dict['up_k_0'])*Sigma_m).rvs()
        return sts.multivariate_normal(mean=mu, cov = Sigma_m).rvs()
Example #4
0
def gaussian_PSFs():
    '''Provide a simple set of PSFs and image for testing and example.

    Returns
    -------
    psfbase : np.array
        Three Gaussians with different width and covariance as PSF base
        functions.
    image, image2 : np.array
        Two images generated from the Gaussian PSFs with some added noise.
    '''
    x, y = np.mgrid[-1:1:.05, -1:1:.05]
    pos = np.empty(x.shape + (2,))
    pos[:, :, 0] = x
    pos[:, :, 1] = y
    psf1 = multivariate_normal([0, 0.], [[2.0, 0.3], [0.3, 0.5]]).pdf(pos)
    psf2 = multivariate_normal([0, 0.], [[1.0, 0.3], [0.3, 0.7]]).pdf(pos)
    psf3 = multivariate_normal([0, 0.], [[1.0, 0], [0, 1.]]).pdf(pos)
    psfbase = np.ma.dstack((psf1, psf2, psf3))
    # Make an image as a linear combination of PSFs plus some noise
    image = 1 * psf1 + 2 * psf2 + 3 * psf3
    image += 0.3 * np.random.rand(*image.shape)
    # Add a faint companion
    image += 0.1 * multivariate_normal([0, 0.05], [[0.2, 0.], [0., 0.05]]).pdf(pos)
    image2 =  2. * psf1 + 2.3 * psf2 + 2.6 * psf3
    image2 += 0.3 * np.random.rand(*image.shape)
    return psfbase, image, image2
Example #5
0
	def shock_create(self, h, shock_type, shock_index, shock_value, shock_dir, irf_intervals):
		""" Function creates shocks based on desired specification

		Parameters
		----------
		h : int
			How many steps ahead to forecast

		shock_type : None or str
			Type of shock; options include None, 'Cov' (simulate from covariance matrix), 'IRF' (impulse response shock)

		shock_index : int
			Which parameter to apply the shock to if using an IRF.

		shock_value : None or float
			If specified, applies a custom-sized impulse response shock.

		shock_dir : str
			Direction of the IRF shock. One of 'positive' or 'negative'.

		irf_intervals : Boolean
			Whether to have intervals for the IRF plot or not

		Returns
		----------
		A h-length list which contains np.ndarrays containing shocks for each variable
		"""		
		# Loop over the forecast period

		if shock_type is None:

			random = [np.zeros(self.ylen) for i in range(h)]

		elif shock_type == 'IRF':

			cov = self.create_cov(self.params)
			post = ss.multivariate_normal(np.zeros(self.ylen),cov)
			if irf_intervals is False:
				random = [np.zeros(self.ylen) for i in range(h)]
			else:
				random = [post.rvs() for i in range(h)]
				random[0] = np.zeros(self.ylen)

			if shock_value is None:
				if shock_dir=='positive':
					random[0][shock_index] = cov[shock_index,shock_index]**0.5
				elif shock_dir=='negative':
					random[0][shock_index] = -cov[shock_index,shock_index]**0.5
				else:
					raise ValueError("Unknown shock direction!")	
			else:
				random[0][shock_index] = shock_value		

		elif shock_type == 'Cov':
			
			cov = self.create_cov(self.params)
			post = ss.multivariate_normal(np.zeros(self.ylen),cov)
			random = [post.rvs() for i in range(h)]

		return random
def importance_pool_sampling(args):
    # args = [i_particle, theta_t_1, w_t_1, sig_t_1, eps_t]
    i_particle = args[0]
    theta_t_1 = args[1]
    w_t_1 = args[2]
    sig_t_1 = args[3]
    eps_t = args[4]

    theta_star = weighted_sampling(theta_t_1, w_t_1)

    np.random.seed()
    # perturbed theta (Double check)
    theta_starstar = multivariate_normal(theta_star, sig_t_1).rvs(size=1)
    model_starstar = simz(theta_starstar)

    rho = distance(data, model_starstar)

    while rho > eps_t:
        theta_star = weighted_sampling(theta_t_1, w_t_1)
        theta_starstar = multivariate_normal(theta_star, sig_t_1).rvs(size=1)
        model_starstar = simz(theta_starstar)

        rho = distance(data, model_starstar)

    p_theta = pi_priors(theta_starstar)

    w_starstar = p_theta / np.sum(w_t_1 * better_multinorm(theta_starstar, theta_t_1, sig_t_1))

    pool_list = [np.int(i_particle)]
    for i_p in xrange(n_params):
        pool_list.append(theta_starstar[i_p])
    pool_list.append(w_starstar)
    pool_list.append(rho)

    return pool_list
def fit_gaussians(x_train_boxcox, y_train):
    """ Fit class-dependent multivariate gaussians on the training set.

    Parameters
    ----------
    x_train_boxcox : np.array [n_samples, n_features_trans]
        Transformed training features.
    y_train : np.array [n_samples]
        Training labels.

    Returns
    -------
    rv_pos : multivariate normal
        multivariate normal for melody class
    rv_neg : multivariate normal
        multivariate normal for non-melody class
    """
    pos_idx = np.where(y_train == 1)[0]
    mu_pos = np.mean(x_train_boxcox[pos_idx, :], axis=0)
    cov_pos = np.cov(x_train_boxcox[pos_idx, :], rowvar=0)

    neg_idx = np.where(y_train == 0)[0]
    mu_neg = np.mean(x_train_boxcox[neg_idx, :], axis=0)
    cov_neg = np.cov(x_train_boxcox[neg_idx, :], rowvar=0)
    rv_pos = multivariate_normal(mean=mu_pos, cov=cov_pos, allow_singular=True)
    rv_neg = multivariate_normal(mean=mu_neg, cov=cov_neg, allow_singular=True)
    return rv_pos, rv_neg
Example #8
0
    def _init_params(self, X):
        init = self.init
        n_samples, n_features = X.shape
        n_components = self.n_components

        if (init == 'kmeans'):
            km = Kmeans(n_components)
            clusters, mean, cov = km.cluster(X)
            coef = sp.array([c.shape[0] / n_samples for c in clusters])
            comps = [multivariate_normal(mean[i], cov[i], allow_singular=True)
                     for i in range(n_components)]
        elif (init == 'rand'):
            coef = sp.absolute(sprand.randn(n_components))
            coef = coef / coef.sum()
            means = X[sprand.permutation(n_samples)[0: n_components]]
            clusters = [[] for i in range(n_components)]
            for x in X:
                idx = sp.argmin([spla.norm(x - mean) for mean in means])
                clusters[idx].append(x)

            comps = []
            for k in range(n_components):
                mean = means[k]
                cov = sp.cov(clusters[k], rowvar=0, ddof=0)
                comps.append(multivariate_normal(mean, cov, allow_singular=True))

        self.coef = coef
        self.comps = comps
Example #9
0
def make_joint_pdf(dataset):
    dataset_fours = dataset[dataset[:, -1] == 4]
    dataset_fours = dataset_fours[:, :-1]

    dataset_not_fours = dataset[dataset[:, -1] != 4]
    dataset_not_fours = dataset_not_fours[:, :-1]

    mean_fours = numpy.mean(dataset_fours, axis=0)
    cov_fours = numpy.cov(dataset_fours.T)

    mean_not_fours = numpy.mean(dataset_not_fours, axis=0)
    cov_not_fours = numpy.cov(dataset_not_fours.T)

    try:
        fours_mv = multivariate_normal(mean=mean_fours, cov=cov_fours)
    except Exception as e:
        print mean_fours, cov_fours
        print 'fours'
        raise e

    try:
        not_fours_mv = multivariate_normal(
            mean=mean_not_fours, cov=cov_not_fours)
    except Exception as e:
        print mean_not_fours, cov_not_fours
        print 'not fours'
        raise e
    return fours_mv, not_fours_mv
def find_optimal_weights(N, cov_reduction, mean_spread, n):
    """ Solve quadratic program to find optimal weights for N gaussians to miminize
    integral squared difference bewteen the mixture and a zero-mean unity gaussian, 
    given target covariance reduction cov_reduction and distance between means 
    mean_spread. Also optionally returns the value of the integral squared 
    difference for the optimal weights."""
    
    cov_i = np.eye(n)
    cov_i[0,0] = cov_reduction
    
    means = np.zeros((n,N))
    for i in range(N):
        means[0,i] = (i - (N-1)/2)*mean_spread
    
    H = np.zeros((N,N))
    for i in range(N):
        for j in range(N):
            H[i,j] = stats.multivariate_normal(means[:,j], 2*cov_i).pdf(means[:,i])
    
    f = np.zeros((N,1))
    for i in range(N):
        f[i,0] = stats.multivariate_normal(means[:,i], np.eye(n)+cov_i).pdf(np.zeros(n))
        
    c = -f.T    
    b = np.zeros((N,1))
    A = np.eye(N)
        
    # J_ISD = J_11 - 2*f.T*w + w^T*H*w
    def loss(w, sign=1.):
        return sign * (np.dot(w.T, np.dot(H, w)) + 2*np.dot(c, w) + J11)

    def jac(w, sign=1.):
        return sign * (2*np.dot(w.T, H) + 2*c)
    
    one_array = np.ones((N,1))
    cons = ({'type':'eq',
             'fun': lambda w: np.dot(one_array.T, w) - 1.,
             'jac': lambda w: one_array.T },)
    '''{'type':'ineq',
             'fun':lambda w: (np.dot(A,w) - b)[:,0],
             'jac':lambda w: A}'''
    
    bounds = [(0,1) for _ in range(N)]    
    
    opt = {'disp': False}
    
    res = optimize.minimize(loss, x0=np.ones(N)/N, jac=jac, bounds=bounds, 
                            constraints=cons, method='SLSQP', options=opt)
    w = res.x

    # Check results
    assert res.success, "quadratic program optimization failed"
    assert np.allclose(np.sum(w), 1.0), "Weights don't add to one" 
    assert np.all(w >= 0) and np.all(w <= 1), "Weights are't 0 < w < 1, " + str(w)
    assert loss(w).shape[0] == 1, "Weird things are happening" 
    
    return w, loss(w)[0]
Example #11
0
	def __init__(self,mean0,cov0,mean1,cov1):
		""" construct a mixture of two gaussians. mean0 is 2x1 vector of means for class 0, cov0 is 2x2 covariance matrix for class 0. 
				Similarly for class 1"""
		self.mean0 = mean0
		self.mean1 = mean1
		self.cov0 = cov0
		self.cov1 = cov1
		self.rv0 = multivariate_normal(mean0, cov0)
		self.rv1 = multivariate_normal(mean1, cov1)
def target(p_i,p_f,t,size):
    prior = multivariate_normal(mu,sigma_1)
    likelihood = prior.logpdf(p_i.T) + prior.logpdf(p_f.T)

    q_i = projection(p_i)
    q_f = projection(p_f)
    for i in range(size):
        f = multivariate_normal(get_qs(q_i,q_f,t[i]),sigma)
        likelihood = likelihood + f.logpdf(r[i,:])
    return likelihood
Example #13
0
def animate(h):
  """
  Performs a step of EM, and returns the graphed result.
  """
  means = gmm.means_
  weights = gmm.weights_
  covars = gmm.covars_

  ### E-step

  # for each cluster
  for j in range(k):
    mvn = multivariate_normal(means[j], covars[j])
    l[:,j] = weights[j]*mvn.pdf(X) 
  # normalize likelihoods to create "responsibility"
  res = l / np.sum(l, axis=1)[:,None]
  
  ### M-step

  for j in range(k):
    jsum = np.sum(res[:,j])
    weights[j] = jsum / np.sum(res)

    rn = res[:,j]
    prod = np.dot(rn, X)
    means[j] = prod/jsum

    numer = np.zeros((d,d))
    for i in range(n):
      whitemean = X[i,:]-means[j]
      whitemean = whitemean[:,None]
      numer += res[i,j]*(whitemean*np.transpose(whitemean))

    covars[j] = numer / jsum

  ### Compute data log likelihood and EM bound
  Lnew = 0.0
  B = 0.0
  for i in range(n):
    lsum = 0.0
    bsum = 0.0

    for j in range(k):
      wpdf = weights[j]*multivariate_normal(means[j],covars[j]).pdf(X[i,:])
      lsum += wpdf
      bsum += res[i,j]*np.log(wpdf/res[i,j])

    B += bsum
    Lnew += np.log(lsum)

  gmm.means_ = means
  gmm.weights_ = weights
  gmm.covars_ = covars

  return vis_gmm_2d(gmm, k, X, ax, rmin, rmax, my_colors)
 def getUniformTestData(self,n):
     """
         Params: Given a LinearDataGenerator object, this creates another
         Output: n datapoints from the same distribution. 
     """
     data_dist  = stats.multivariate_normal(np.zeros(self.p),self.width*np.eye(self.p))
     noise_dist = stats.multivariate_normal(np.zeros(self.n),self.var*np.eye(self.n))
     X = np.array([data_dist.rvs() for i in range(self.n)])
     y = np.array([[X[i,:].dot(self.bg)[0] for i in range(self.n)]]).T \
          + np.array([noise_dist.rvs()]).T
     return X, y
Example #15
0
 def marginal(self, i1=None, i2=None, x=None):
     warn("Warning: The value of the marginal likelihood provided corresponds to "+
          "marginalising over an infinite parameter space. Notice that the prior may "+
          "only be defined positive in a smaller region.")
     if i1 is None and i2 is None:
         return float(self.n_modes)
     elif i2 is None:
         m_gaussians = [multivariate_normal(g.mean[i1], [[g.cov[i1,i1]]]) for g in self.gaussians]
     else:
         m_gaussians = [multivariate_normal(
                          g.mean[[i1,i2]], [[g.cov[i1,i1],g.cov[i1,i2]],[g.cov[i2,i1],g.cov[i2,i2]]])
                        for g in self.gaussians]
     assert x is not None, "You must provide a point where to evaluate the marginal."
     return sum([g.pdf(x) for g in m_gaussians])
Example #16
0
def calc_posterior(means, data):
    prior = calc_prior(data)
    posterior = [[], []]
    mvn_pdf = [multivariate_normal(mean=means[0], cov=np.eye(2)), multivariate_normal(mean=means[1], cov=np.eye(2))]
    for i, mean in enumerate(means):
        posterior[i] = 0
        for point in data[i]:
            #for mean
            point_coordinates = point[:-1]
            likelihood = mvn_pdf[i].pdf(point_coordinates)
            numerator = likelihood * prior[i]
            normalizer = mvn_pdf[0].pdf(point_coordinates) * prior[0] + mvn_pdf[1].pdf(point_coordinates) * prior[1]
            posterior[i] *= (numerator / normalizer)
    return posterior
    def __init__(self, landmark, gmm, classifier_loss, relation_name):
        super(TransferModel, self).__init__()
        self.gmm                = gmm
        self.landmark           = landmark
        self.classifier_loss    = classifier_loss
        self.positive           = multivariate_normal(gmm.means_[1], gmm.covars_[1])#, allow_singular=True)
        self.negative           = multivariate_normal(gmm.means_[0], gmm.covars_[0])#, allow_singular=True)

        if relation_name == 'near':
            self.scoring_function   = support_functions.distance_pose_xy
        elif relation_name == 'relative_angle':
            self.scoring_function   = support_functions.unit_circle_position_pose_xy
        else:
            raise rospy.ROSException("Unknown relation_name %s"%relation_name)
def gaussian_mix(query):
    # Assign multivariate gaussians to be present in the space.
    gaussians = [multivariate_normal(mean=[0.9, 0.1], cov=[[0.05, 0], [0, 0.05]])]
    gaussians.append(multivariate_normal(mean=[0.9, 0.9], cov=[[0.07, 0.01], [0.01, 0.07]]))
    gaussians.append(multivariate_normal(mean=[0.15, 0.7], cov=[[0.03, 0], [0, 0.03]]))
    # Initialize initial value.
    value = 0.0
    # Iterate through each gaussian in the space.
    for j in xrange(len(gaussians)):
        value += gaussians[j].pdf(query)

    # Take the average.
    gaussian_function = value / len(gaussians)
    return vec(gaussian_function)  # vec(np.array([query.ravel(), gaussian_function]).ravel())
def gmm(k, xs, tol=1e-6, max_iter=200):
    """Vectorized version of GMM. Faster than above but still rough."""
    
    n, p = xs.shape
    
    mus, z = initialization.kmeanspp(k, xs, ret='both')
    pis = np.array([len(np.where(z==i)[0])/n for i in np.unique(z)])
    sigmas = np.array([np.eye(p)]*k)

    ll_old = 0
    for i in range(max_iter):
        exp_A = []
        exp_B = []
        ll_new = 0

        # E-step, ws are responsabilities
        ws = np.zeros((k, n))
        for j in range(k):
            ws[j, :] = pis[j]*multivariate_normal(mus[j], sigmas[j]).pdf(xs)
        ws /= ws.sum(0)
            
        # M-step
        pis = ws.sum(axis=1)
        pis /= n

        mus = np.dot(ws, xs)
        mus /= ws.sum(1)[:, None]

        sigmas = np.zeros((k, p, p))
        for j in range(k):
            ys = xs - mus[j, :]
            sigmas[j] = (ws[j,:,None,None]*\
                       matrix_multiply(ys[:,:,None], ys[:,None,:])).sum(axis=0)
        sigmas /= ws.sum(axis=1)[:,None,None]

        # update complete log likelihoood
        ll_new = 0
        for pi, mu, sigma in zip(pis, mus, sigmas):
            ll_new += pi*multivariate_normal(mu, sigma).pdf(xs)
        ll_new = np.log(ll_new).sum()

        # convergence test
        if np.abs(ll_new - ll_old) < tol:
            break
        ll_old = ll_new

    z = ws.T
    labels = np.argmax(z, axis=1)

    return labels
Example #20
0
def prob4():
    """Approximate the probability that a random draw from the
    multivariate standard normal distribution will be less than -1 in 
    the x-direction and greater than 1 in the y-direction.
    Returns: your estimated probability"""
    N = 500000
    h = lambda x: x[0] < -1 and x[1] > 1
    f = lambda x: stats.multivariate_normal(mean = [0, 0]).pdf(x)
    g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)
    X = np.random.multivariate_normal(mean = [-1, 1], cov = [[1, 0], [0, 1]], size = N)
    tot = 0
    for i in range(N):
        tot += (h(X[i])*f(X[i])/g(X[i]))
    return 1./N * tot
    
Example #21
0
 def __init__(self, mean, cov):
     self.mean = mean
     self.cov = np.matrix(cov)
     self.dim = len(self.mean)
     assert self.cov.shape[0] == self.cov.shape[1]
     domain = ProductDomain([IntervalDomain(-np.inf, +np.inf) for i in range(len(self.mean))])
     super().__init__(stats.multivariate_normal(self.mean, self.cov), domain)
 def learn(self, sequence, zeta=None, init=False, only_final_ll=False):
     ''' Runs Baum-Welch to train HMM '''
     K = self.num_states
     epsilon = 0.1
     old_ll = float("-inf")
     iterations = 0
     if init:            
         means, covs = self.get_kmeans_emission_init(sequence)
         self.emission_density_objs = [multivariate_normal(mean=means[k], cov=covs[k], allow_singular=True) for k in range(K)]            
         start_probs, transition_probs = self.get_random_start_and_trans_probs()
         self.start_probs = start_probs
         self.trans_probs = transition_probs
     while True:
         iterations += 1
         self.forward(sequence)
         self.backward(sequence)
         self.compute_gamma_table(sequence)
         self.compute_xi_table(sequence)
         if not only_final_ll:
             print "Log Likelihood:", self.log_likelihood            
         if abs(self.log_likelihood - old_ll) < epsilon or iterations > MAX_ITER:
             print "Final Log Likelihood:", self.log_likelihood
             return
         else:
             old_ll = self.log_likelihood
         self.update_start_probs()
         self.trans_update_method(sequence, zeta)
         self.update_emission_parameters(sequence)
Example #23
0
    def predict_log_probs(self, test_data):
        num_points = test_data["coordinates"].shape[0]
        topic_log_prob_vector = np.zeros((self.num_topics, num_points))
        # topic_prob_vector = np.ones((self.num_topics, num_points))

        for z in range(self.num_topics):
            try:
                rv = stats.multivariate_normal(mean=self.topic_centers[z, :],
                                               cov=self.topic_covar[z, :, :], allow_singular=True)
                loc_log_prob = rv.logpdf(test_data["coordinates"]).reshape((num_points, 1))  # Nx1
                # loc_prob = rv.pdf(test_data["coordinates"]).reshape((num_points, 1))  # Nx1

            except:
                print("Error while computing geo log probabilities for test, dumping data.", "\n",
                      self.topic_centers[z, :], "\n", self.topic_covar[z, :, :], file=sys.stderr)
                traceback.print_stack(file=sys.stderr)
                sys.exit(1)

            feature_log_prob = np.zeros((num_points, 1))
            # feature_prob = np.ones((num_points, 1))
            for feature in self.beta_arrays.keys():
                beta = self.beta_arrays[feature][z]
                num_words = beta.shape[0]
                feature_log_prob += test_data[feature] * np.log(beta.reshape((num_words, 1)))  # (NxV * Vx1) = Nx1
                # feature_prob *= np.prod(np.power(beta.reshape((num_words, 1)).T, test_data[feature].todense()), axis=1)

            topic_log_prob_vector[z] = (loc_log_prob +
                                        feature_log_prob +
                                        np.tile(np.log(self.theta[0, z]), (num_points, 1))).flatten()
            # topic_prob_vector[z] = np.multiply(np.multiply(loc_prob, feature_prob),
            #                                    np.tile(self.theta[0, z], (num_points, 1))).flatten()

        # print("direct")
        # print(np.sum(np.log(np.sum(topic_prob_vector, axis=0))))
        return np.sum(utils.log_sum(topic_log_prob_vector, axis=0))
Example #24
0
def _do_plot_test():

    from numpy.random import multivariate_normal
    p = np.array([[32, 15],[15., 40.]])

    x,y = multivariate_normal(mean=(0,0), cov=p, size=5000).T
    sd = 2
    a,w,h = covariance_ellipse(p,sd)
    print (np.degrees(a), w, h)

    count = 0
    color=[]
    for i in range(len(x)):
        if _is_inside_ellipse(x[i], y[i], 0, 0, a, w, h):
            color.append('b')
            count += 1
        else:
            color.append('r')
    plt.scatter(x,y,alpha=0.2, c=color)


    plt.axis('equal')

    plot_covariance_ellipse(mean=(0., 0.),
                            cov = p,
                            std=sd,
                            facecolor='none')

    print (count / len(x))
def random_algorithm(cmd, world):
	randx = world.xdim*np.random.random_sample()
	randy = world.ydim*np.random.random_sample()

	mv_dist = multivariate_normal(np.array([randx, randy]), np.array([[1, 0], [0, 1]]))

	return mv_dist
Example #26
0
def prob4():
    """Approximate the probability that a random draw from the
    multivariate standard normal distribution will be less than -1 in 
    the x-direction and greater than 1 in the y-direction."""
    h = lambda y: y[0] < -1 and y[1] > 1
    f = lambda y: stats.multivariate_normal(np.zeros(2), np.eye(2)).pdf(y)
    g = lambda y: stats.multivariate_normal(np.array([-1, 1]), np.eye(2)).pdf(y)

    n = 10 ** 4
    Y = np.random.multivariate_normal(np.array([-1, 1]), np.eye(2), size=n)
    hh = np.apply_along_axis(h, 1, Y)
    ff = np.apply_along_axis(f, 1, Y)
    gg = np.apply_along_axis(g, 1, Y)
    approx = 1.0 / n * np.sum(hh * ff / gg)

    return approx
Example #27
0
    def __init__(self, cov=None, random_state=None):

        super(GaussianProposal, self).__init__(random_state=random_state)

        self._cov = cov
        # TODO: Use the given random state instead!
        self._norm = stat.multivariate_normal(cov=cov)
def objects_walls_algorithm(cmd, world, k1=4.2, k2=4.4):
	x, y = np.mgrid[0:world.xdim:.1, 0:world.ydim:.1]

	# Calculate naive distribution
	naive_dist = naive_algorithm(cmd, world)
	naive_vals = naive_dist.pdf(np.dstack((x, y)))

	# Find Distance to closest object
	ref_dists = {ref : np.sqrt((x - ref.center[0])**2 + (y - ref.center[1])**2) for ref in world.references}
	min_ref_dists = np.min(np.dstack(ref_dists[ref] for ref in ref_dists), axis=2)

	# Difference between distance to closest object and object reference in command
	ref_distance_diff = ref_dists[cmd.reference] - min_ref_dists

	ref_distance_vals = expon.pdf(ref_distance_diff, scale=k1)

	# Find distance to nearest wall
	min_wall_dists = np.min(np.dstack((x, y, world.xdim - x, world.ydim - y)), axis=2)

	# Difference between distance to closest wall and object reference in command
	wall_distance_diff = ref_dists[cmd.reference] - min_wall_dists
	wall_distance_diff[wall_distance_diff < 0] = 0

	wall_distance_vals = expon.pdf(wall_distance_diff, scale=k2)

	mean_prob = naive_vals*ref_distance_vals*wall_distance_vals
	loc = np.where(mean_prob == mean_prob.max())
	mean = 0.1*np.array([loc[0][0], loc[1][0]])

	mv_dist = multivariate_normal(mean, naive_dist.cov)

	return mv_dist
Example #29
0
def generate_2d_shower_model(centroid, width, length, psi):
    """Create a statistical model (2D gaussian) for a shower image in a
    camera. The model's PDF (`model.pdf`) can be passed to
    `make_toymodel_shower_image`.

    Parameters
    ----------
    centroid : (float,float)
        position of the centroid of the shower in camera coordinates
    width : float
        width of shower (minor axis)
    length : float
        length of shower (major axis)
    psi : convertable to `astropy.coordinates.Angle`
        rotation angle about the centroid (0=x-axis)

    Returns
    -------

    a `scipy.stats` object

    """
    aligned_covariance = np.array([[length**2, 0], [0, width**2]])
    # rotate by psi angle: C' = R C R+
    rotation = linalg.rotation_matrix_2d(psi)
    rotated_covariance = rotation.dot(aligned_covariance).dot(rotation.T)
    return multivariate_normal(mean=centroid, cov=rotated_covariance)
def cheating_algorithm(pts):
	mean = np.mean(pts, axis=0)
	covariance = np.cov(pts.T)

	mv_dist = multivariate_normal(mean, covariance)

	return mv_dist
Example #31
0
# methods to generate the three-dimensional distribution, as follows,

mean = [-100, 50, 0.15]  # given as [isotropic chemical shift in ppm, zeta in ppm, eta].
covariance = [[3.25, 0, 0], [0, 26.2, 0], [0, 0, 0.002]]  # same order as the mean.

# range of coordinates along the three dimensions
iso_range = np.arange(100) * 0.3055 - 115  # in ppm
zeta_range = np.arange(30) * 2.5 + 10  # in ppm
eta_range = np.arange(21) / 20

# The coordinates grid
iso, zeta, eta = np.meshgrid(iso_range, zeta_range, eta_range, indexing="ij")
pos = np.asarray([iso, zeta, eta]).T

# Three-dimensional probability distribution function.
pdf = multivariate_normal(mean=mean, cov=covariance).pdf(pos).T

# %%
# Here, ``iso``, ``zeta``, and ``eta`` are the isotropic chemical shift, nuclear
# shielding anisotropy, and nuclear shielding asymmetry coordinates of the 3D-grid
# system over which the multivariate normal probability distribution is evaluated. The
# mean of the distribution is given by the variable ``mean`` and holds a value of -100
# ppm, 50 ppm, and 0.15 for the isotropic chemical shift, nuclear shielding anisotropy,
# and nuclear shielding asymmetry parameter, respectively. Similarly, the variable
# ``covariance`` holds the covariance matrix of the multivariate normal distribution.
# The two-dimensional projections from this three-dimensional distribution are shown
# below.
_, ax = plt.subplots(1, 3, figsize=(9, 3))

# isotropic shift v.s. shielding anisotropy
ax[0].contourf(zeta_range, iso_range, pdf.sum(axis=2))
            log_data = g.read().strip().split("\n")
            g.close()

            # If last line starts with Best, get line before
            flag = True
            i = -1
            while flag:
                if log_data[i].startswith("Best"):
                    variance = float(log_data[i - 1].strip().split()[4]) / 100
                    flag = False
                else:
                    i -= 1

            seed = row["seed"]

            random_variable = multivariate_normal(
                mean=mean_array, cov=np.identity(len(mean_array)) * variance)
            team = random_variable.rvs(2, seed)

            agent_1 = NNAgent(fitness_calculator.get_observation_size(),
                              fitness_calculator.get_action_size(),
                              parameter_filename, team[0])
            agent_2 = NNAgent(fitness_calculator.get_observation_size(),
                              fitness_calculator.get_action_size(),
                              parameter_filename, team[1])
            results = fitness_calculator.calculate_fitness(
                agent_list=[agent_1, agent_2],
                render=False,
                time_delay=0,
                measure_specialisation=True,
                logging=False,
                logfilename=None,
Example #33
0
def MCMC_multivariate_ssm(test_data,
                          causal_period,
                          nseasons=12,
                          iterloop=1000,
                          burnin=100,
                          stationary=True,
                          graph=False,
                          graph_structure=None,
                          seed=3):

    ############## load functions ###############
    if stationary:
        from stationaryRestrict import stationaryRestrict

    ############### organize dataset #################
    length, d = test_data.shape
    # seperate causal period dataset
    causal_period = causal_period
    causal_period_len = causal_period.shape[0]
    length_non_causal = length - causal_period_len

    ############### initialize parameters #################
    # initialize z
    circle = np.min((nseasons, length))
    n = (circle + 1) * d
    z = np.zeros((n, d))
    z[:d, :] = np.eye(d)
    z[2 * d:3 * d, :] = np.eye(d)

    # initialize mu: intercept of hidden equation
    mu_ss = np.zeros(n)

    # initialize alpha: (see Durbin and Koopman, 2002)
    alpha = np.zeros(n, dtype=np.int32)
    aStar = np.zeros(n, dtype=np.int32)
    # initialize variance of alpha
    P = np.zeros((n, n), dtype=np.int32)
    P[:3 * d, :3 * d] = np.eye(3 * d) * 1e6
    if stationary:
        P[d:2 * d, d:2 * d] = np.eye(d)

    # initialize sigma
    delta = d + 1  # prior for sigma
    B = np.eye(d)  # prior for sigma
    # give right parameters t0 wishart
    sigma_hat_inv = wishart(
        df=d + 1, scale=np.eye(d) / .1**2 / d).rvs() * graph_structure

    # what's happening here
    sigma_hat = chol2inv(sigma_hat_inv)

    # initialize transition matrix
    trans = np.zeros((n, n))
    linear = np.eye(2 * d)
    linear[:d, d:2 * d] = np.eye(d)
    trans[:2 * d, :2 * d] = linear
    # take initial variance of tau from the data
    if stationary:
        # find python library with autoregressive models
        var = VAR(test_data[causal_period, :])
        data_phi = var.fit(maxlags=1, ic=None, trend="nc").params
        trans[d:2 * d, d:2 * d] = data_phi
    else:
        trans[d:2 * d, d:2 * d] = np.eye(d)

    seasonal = np.zeros((circle - 1, circle - 1))
    seasonal[0, :] = -1
    seasonal[1:circle - 1, :circle - 2] = np.eye(circle - 2)
    for dims in range(d):
        trans[2 * d + dims:n:d, 2 * d + dims:n:d] = seasonal

    # initialize R
    R = np.zeros((n, d * 3))
    R[:3 * d, :3 * d] = np.eye(3 * d)

    # initialize covariance matrix Q = bdiag(sigmaU, sigmaV, sigmaW)
    k1, k2, k3 = 0.1, 0.1, 0.1
    # what's happening here
    if not graph:
        sigmaU = chol2inv(wishart(d, k1**2 * d * np.eye(d)).rvs())
        sigmaV_inv = wishart(d, k2**2 * d * np.eye(d)).rvs()
        sigmaV = chol2inv(sigmaV_inv)
        sigmaW = chol2inv(wishart(d, k3**2 * d * np.eye(d)).rvs())
    else:
        sigmaU = chol2inv(
            wishart(df=d + 1, scale=np.eye(d) / k1**2 / d).rvs() *
            graph_structure)
        sigmaV_inv = wishart(
            df=d + 1, scale=np.eye(d) / k1**2 / d).rvs() * graph_structure
        sigmaV = chol2inv(sigmaV_inv)
        sigmaW = chol2inv(
            wishart(df=d + 1, scale=np.eye(d) / k1**2 / d).rvs() *
            graph_structure)

    Q = sla.block_diag(sigmaU, sigmaV, sigmaW)

    ################### Prepare for MCMC Sampling ##################
    # create matrix to store parameter draws
    mu_sample = np.empty((length, d, iterloop))
    a_last_sample = np.empty((n, iterloop))
    P_last_sample = np.empty((n, n, iterloop))
    prediction_sample = np.empty((length, d, iterloop))
    sigma_sample = np.empty((d, d, iterloop))
    sigma_U_sample = np.empty((d, d, iterloop))
    sigma_V_sample = np.empty((d, d, iterloop))
    sigma_W_sample = np.empty((d, d, iterloop))
    if stationary:
        Theta_sample = np.empty((d, d, iterloop))
        D_sample = np.zeros((d, iterloop))

    # pb  = txtProgressBar(1, iterloop, style=3)    # report progress
    # print("\nStarting MCMC sampling: \n")     # report progress
    ##################### Begin MCMC Sampling #######################
    # ptm = proc.time()
    for itery in tqdm(range(iterloop), desc="MCMC sampling"):
        ## --------------------------------------- ##
        ## Step 1. obtain draws of alpha, apply Koopman's filter (2002)
        # simulate w.hat, y.hat, alpha.hat for Koopman's filter (2002)
        alpha_plus = np.zeros((length, n))
        for t in range(length):
            eta = multivariate_normal(mean=np.zeros(3 * d), cov=Q,
                                      seed=seed).rvs()
            if t == 0:
                alpha_plus[t, :] = mu_ss + trans.dot(alpha) + R.dot(eta)
            else:
                alpha_plus[t, :] = mu_ss + trans.dot(
                    alpha_plus[t - 1, :]) + R.dot(eta)

        test_est_plus = alpha_plus.dot(z) + multivariate_normal(
            mean=np.zeros(d), cov=sigma_hat, seed=seed).rvs(size=length)
        test_est_star = test_data - test_est_plus
        # Estimate alpha parameters
        sample_alpha_draws = koopmanfilter(n, test_est_star, trans, z, aStar,
                                           2 * P, 2 * sigma_hat, 2 * Q, R,
                                           causal_period)
        alpha_star_hat = sample_alpha_draws["alpha sample"]
        alpha_draws = alpha_star_hat + alpha_plus

        # collect a.last and P.last,
        # use them for starting point of koopman filter for causal period dataset
        # print(a_last_sample.shape, sample_alpha_draws["a last"].shape)
        a_last_sample[:, itery] = sample_alpha_draws["a last"]
        P_last_sample[:, :, itery] = sample_alpha_draws["P last"]

        ## ---------------------------------------- ##
        ## Step 2: make stationary restriction
        if stationary:
            alpha_draws_tau = alpha_draws[:length_non_causal, d:d * 2]
            if itery == 0:
                alpha_draws_tau_demean = alpha_draws_tau
                Theta_draw = stationaryRestrict(alpha_draws_tau_demean, sigmaV,
                                                sigmaV_inv)
            else:
                alpha_draws_tau_demean = (alpha_draws_tau.T -
                                          D_draw.reshape(-1, 1)).T
                Theta_draw = stationaryRestrict(alpha_draws_tau_demean,
                                                sigmaV_draws, sigmaV_inv)
            trans[d:2 * d, d:2 * d] = Theta_draw

            ## ---------------------------------------- ##
            ## Step 3: sample intercept mu.D, denote N(0, I) prior for D
            tau_part_A = alpha_draws_tau[1:length_non_causal, :] \
                         - alpha_draws_tau[:length_non_causal-1, :].dot(Theta_draw.T)
            tau_part_B = np.eye(d) - Theta_draw
            D_var = la.inv((length_non_causal - 1) *
                           tau_part_B.T.dot(sigmaV_inv).dot(tau_part_B) +
                           np.eye(d))
            D_var = check_positive(D_var)
            D_mean = D_var.dot((tau_part_B.T.dot(sigmaV_inv)).dot(
                tau_part_A.sum(axis=0)))  #.reshape(-1, 1)))  #it's wrong
            D_draw = multivariate_normal(mean=D_mean, cov=D_var,
                                         seed=seed).rvs()
            # update the mean: D - theta * D
            D_mu = tau_part_B.dot(D_draw)
            # print(D_mu.shape, mu_ss.shape)
            mu_ss[d:2 * d] = D_mu
            # update alpha_draws_tau_demean
            alpha_draws_tau_demean = (alpha_draws_tau.T -
                                      D_draw.reshape(-1, 1)).T

        ## ---------------------------------------- ##
        ## Step 4: update sigmaU, sigmaV, sigmaW
        # parameter in sigmaU
        PhiU_value = alpha_draws[1:length_non_causal, :d] \
                     - alpha_draws[:length_non_causal-1, :d] \
                     - alpha_draws[:length_non_causal-1, d:d*2]

        PhiU = PhiU_value.T.dot(PhiU_value)
        # parameter in sigmaV
        if stationary:
            PhiV_value = alpha_draws_tau_demean[1:length_non_causal, :] \
                         - alpha_draws_tau_demean[:length_non_causal-1, :].dot(Theta_draw.T)
            PhiV = PhiV_value.T.dot(PhiV_value)
        else:
            PhiV_value = alpha_draws[1:length_non_causal, d:2*d] \
                         - alpha_draws[:length_non_causal-1, d:2*d]
            PhiV = PhiV_value.T.dot(PhiV_value)
        # parameter in sigmaW
        bind_W = np.zeros((causal_period[0] - 1, 0))
        for dims in range(d):
            bind_W_tmp = np.hstack(
                (alpha_draws[1:length_non_causal, d * 2 + dims:n:d],
                 alpha_draws[:length_non_causal - 1, n - d + dims, None]))
            bind_W = np.hstack((bind_W, bind_W_tmp.sum(axis=1).reshape(-1, 1)))

        PhiW = bind_W.T.dot(bind_W)

        scale_U = PhiU + (d + 1) * k1**2 * np.eye(d)
        scale_V = PhiV + (d + 1) * k2**2 * np.eye(d)
        scale_W = PhiW + (d + 1) * k3**2 * np.eye(d)

        # start from here
        # sample sigmaU, sigmaV, sigma W from their posteriors
        if not graph:
            sigmaU_draws = la.inv(
                wishart(length_non_causal + d - 1, scale_U).rvs())
            sigmaV_inv = wishart(length_non_causal + d - 1, scale_V).rvs()
            sigmaV_draws = la.inv(sigmaV_inv)
            sigmaW_draws = la.inv(
                wishart(length_non_causal + d - 1, scale_W).rvs())
        else:
            sigmaU_inv = wishart(length_non_causal + d - 1,
                                 la.inv(scale_U)).rvs() * graph_structure
            sigmaU_draws = la.inv(check_positive(sigmaU_inv))
            sigmaV_inv = wishart(length_non_causal + d - 1,
                                 la.inv(scale_V)).rvs() * graph_structure
            sigmaV_draws = la.inv(check_positive(sigmaV_inv))
            sigmaW_inv = wishart(length_non_causal + d - 1,
                                 la.inv(scale_W)).rvs() * graph_structure
            sigmaW_draws = la.inv(check_positive(sigmaW_inv))

        Q = sla.block_diag(sigmaU_draws, sigmaV_draws, sigmaW_draws)

        ## ---------------------------------------- ##
        ## Step 5: update sigma_hat
        res = (test_data - alpha_draws.dot(z))[:length_non_causal, :]
        if not graph:
            D_sigma = res.T.dot(res) + B
            sigma_hat_inv = wishart(delta + length_non_causal, D_sigma).rvs()
            sigma_hat = la.inv(check_positive(sigma_hat_inv))
        else:
            D_sigma = res.T.dot(res) + B
            sigma_hat_inv = wishart(delta + length_non_causal,
                                    la.inv(D_sigma)).rvs() * graph_structure
            sigma_hat = la.inv(check_positive(sigma_hat_inv))

        ## ---------------------------------------- ##
        ## Step 6: estimating dataset using predicted value
        prediction_sample[:, :,
                          itery] = alpha_draws.dot(z) + multivariate_normal(
                              mean=np.zeros(d), cov=sigma_hat, seed=seed).rvs(
                                  alpha_draws.shape[0])
        ## ---------------------------------------- ##
        ## Step 7: collect sample draws
        mu_sample[:, :, itery] = alpha_draws[:length, :d]
        if stationary:
            Theta_sample[:, :, itery] = Theta_draw
            D_sample[:, itery] = D_draw
        sigma_sample[:, :, itery] = sigma_hat
        sigma_U_sample[:, :, itery] = sigmaU_draws
        sigma_V_sample[:, :, itery] = sigmaV_draws
        sigma_W_sample[:, :, itery] = sigmaW_draws

    # return result
    if stationary:
        return_dict = {
            "prediction sample": prediction_sample,
            "mu sample": mu_sample,
            "Theta sample": Theta_sample,
            "D sample": D_sample,
            "sigma sample": sigma_sample,
            "sigma U sample": sigma_U_sample,
            "sigma V sample": sigma_V_sample,
            "sigma W sample": sigma_W_sample,
            "a last sample": a_last_sample,
            "P last sample": P_last_sample,
            "z": z,
            "R": R,
            "trans": trans
        }
    else:
        return_dict = {
            "prediction sample": prediction_sample,
            "mu sample": mu_sample,
            "sigma sample": sigma_sample,
            "sigma U sample": sigma_U_sample,
            "sigma V sample": sigma_V_sample,
            "sigma W sample": sigma_W_sample,
            "a last sample": a_last_sample,
            "P last sample": P_last_sample,
            "z": z,
            "R": R,
            "trans": trans
        }

    return return_dict
Example #34
0
# ---------------------------------

# The range of isotropic chemical shifts, the quadrupolar coupling constant, and
# asymmetry parameters used in generating a 3D grid.
iso_r = np.arange(101) / 1.5 + 30  # in ppm
Cq_r = np.arange(100) / 4  # in MHz
eta_r = np.arange(10) / 9

# The 3D mesh grid over which the distribution amplitudes are evaluated.
iso, Cq, eta = np.meshgrid(iso_r, Cq_r, eta_r, indexing="ij")

# The 2D amplitude grid of Cq and eta is sampled from the Czjzek distribution model.
Cq_dist, e_dist, amp = CzjzekDistribution(sigma=1).pdf(pos=[Cq_r, eta_r])

# The 1D amplitude grid of isotropic chemical shifts is sampled from a Gaussian model.
iso_amp = multivariate_normal(mean=58, cov=[4]).pdf(iso_r)

# The 3D amplitude grid is generated as an uncorrelated distribution of the above two
# distribution, which is the product of the two distributions.
pdf = np.repeat(amp, iso_r.size).reshape(eta_r.size, Cq_r.size, iso_r.size)
pdf *= iso_amp
pdf = pdf.T

# %%
# The two-dimensional projections from this three-dimensional distribution are shown
# below.
_, ax = plt.subplots(1, 3, figsize=(9, 3))

# isotropic shift v.s. quadrupolar coupling constant
ax[0].contourf(Cq_r, iso_r, pdf.sum(axis=2))
ax[0].set_xlabel("Cq / MHz")
    return np.random.multivariate_normal([0.8, 0.8],
                                         [[0.1, -0.1], [-0.1, 0.12]])


random_pair = generate_random_number_pair()
print(random_pair)

# Exercise 3.1.3

x, y = np.mgrid[-0.25:2.25:.01, -1:2:.01]
pos = np.empty(x.shape + (2, ))
pos[:, :, 0] = x
pos[:, :, 1] = y
mu_p = [0.8, 0.8]
cov_p = [[0.1, -0.1], [-0.1, 0.12]]
z = multivariate_normal(mu_p, cov_p).pdf(pos)

fig = plt.figure(figsize=(10, 10), dpi=300)
ax = fig.gca(projection='3d')
ax.plot_surface(x, y, z)
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
ax.set_zlabel('$p(x_1, x_2 | x_3=0, x_4=0)$')
plt.savefig('3_1_3.png', bbox_inches='tight', dpi=300)
plt.show()

# Exercise 3.2.1

number_of_datapoints = 1000

covariance_3_2 = np.array([[2.0, 0.8], [0.8, 4.0]])
Example #36
0
 def d_prior(self, x_old, x_new):
     """ X_t | X_t-1 """
     return stats.multivariate_normal(self.model_mu,self.model_sigma).pdf(x_new-x_old)
                default=False)
ap.add_argument("--chi2bins", dest="NCHI2BINS", type=int, default=50)
ap.add_argument("--databins", dest="NDATABINS", type=int, default=20)
ap.add_argument("--resample-data",
                dest="RESAMPLE_DATA",
                action="store_true",
                default=False)
args = ap.parse_args()

## Make perfect pseudodata and correlations
xedges, xmids, datavals, dataerrs, datacov = mkData(args.NDATABINS,
                                                    args.CORRMODE)
np.random.seed(12345)

## Smear once, from the perfect distribution and with the true covariance
mn1 = st.multivariate_normal(datavals, datacov)
if args.RESAMPLE_DATA:
    datavals_smear1 = mn1.rvs(size=args.NITERS)
else:
    datavals_smear1 = np.tile(mn1.rvs(size=1), (args.NITERS, 1))
#print(datavals_smear1)

## Fit nominal
chi2s, chi2s_fit, fitdata = [], [], []
invcov = np.linalg.inv(datacov) if args.CORRCHI2 else np.diag(
    np.reciprocal(dataerrs**2))
chi2s.append(chi2(datavals, datavals_smear1[0], invcov))
optres1 = opt.minimize(
    lambda ps: chi2FromParams(datavals_smear1[0], xmids, ps, invcov),
    [5., 10.])
chi2s_fit.append(optres1.fun)
Example #38
0
S = 0.05

# genere des donnes
sigma = 500
M = 200  # nombre de donnees


def gen_mean():
    x = np.random.randint(XMIN, XMAX)
    y = np.random.randint(YMIN, YMAX)
    return (x, y)


mean = gen_mean()
cov = np.eye(2) * sigma
Gaussian = multivariate_normal(mean=mean, cov=cov)
X, Y = np.meshgrid(np.linspace(XMIN, XMAX, M), np.linspace(YMIN, YMAX, M))
pos = np.dstack([X, Y])
temperature_fictive = Gaussian.pdf(pos) * 1000000

# define function


def f(x, y):
    return -1 * temperature_fictive[y, x]


# init
x0 = np.random.randint(XMIN, XMAX)
y0 = np.random.randint(YMIN, YMAX)
T = T_init
Example #39
0
import scipy.stats as ss
import scipy.linalg as sl
import scipy.optimize as so
import matplotlib.pyplot as plt

np.random.seed(0)

# generate data
mu_0 = [-5, 1]  # 分类0的数据
cov_0 = 1.5**2 * np.eye(2)

mu_1 = [1, 5]  # 分类1的数据
cov_1 = np.eye(2)

N = 30  # 每个分类的样本点数
rv0 = ss.multivariate_normal(mean=mu_0, cov=cov_0)
rv1 = ss.multivariate_normal(mean=mu_1, cov=cov_1)

X = np.vstack((rv0.rvs(N), rv1.rvs(N)))
y = np.concatenate((np.zeros(N), np.ones(N)))

isZero = y == 0
points_0 = X[isZero]
points_1 = X[~isZero]

# plot the points
fig = plt.figure(figsize=(11, 10))
fig.canvas.set_window_title('logregLaplaceGirolamiDemo_Part_1')

plt.subplot(221)
plt.axis([-10, 5, -8, 8])
Example #40
0
def kernDist(index, center):
    distr = ss.multivariate_normal([0, 0], [[15, 0], [0, 15]])
    return distr.pdf([index[0] - center[0], index[1] - center[1]])
Example #41
0
 def r_prop(self, size, x_old, y):
     r = stats.multivariate_normal(x_old + self.filter_mu, self.filter_sigma).rvs(size)
     return(r)
Example #42
0
 def d_prop(self, x_old, x_new, y):
     return stats.multivariate_normal(self.filter_mu, self.filter_sigma).pdf(x_new-x_old)
Example #43
0
 def r_init_prop(self, size, y):
     return stats.multivariate_normal(self.filter_mu, self.filter_sigma).rvs(size).reshape(self.N, self.x_dim)
Example #44
0
def detect_keypoints(imagename, threshold):
    # SIFT Detector 
    #--------------
     
    original = ndimage.imread(imagename, flatten=True)

    # SIFT Parameters
    s = 3
    k = 2 ** (1.0 / s)
    # threshold variable is the contrast threshold. Set to at least 1

    # Standard deviations for Gaussian smoothing
    kvec1 = np.array([1.3, 1.6, 1.6 * k, 1.6 * (k ** 2), 1.6 * (k ** 3), 1.6 * (k ** 4)])
    kvec2 = np.array([1.6 * (k ** 2), 1.6 * (k ** 3), 1.6 * (k ** 4), 1.6 * (k ** 5), 1.6 * (k ** 6), 1.6 * (k ** 7)])
    kvec3 = np.array([1.6 * (k ** 5), 1.6 * (k ** 6), 1.6 * (k ** 7), 1.6 * (k ** 8), 1.6 * (k ** 9), 1.6 * (k ** 10)])
    kvec4 = np.array([1.6 * (k ** 8), 1.6 * (k ** 9), 1.6 * (k ** 10), 1.6 * (k ** 11), 1.6 * (k ** 12), 1.6 * (k ** 13)])
    kvectotal = np.array([1.6, 1.6 * k, 1.6 * (k ** 2), 1.6 * (k ** 3), 1.6 * (k ** 4), 1.6 * (k ** 5), 1.6 * (k ** 6), 1.6 * (k ** 7), 1.6 * (k ** 8), 1.6 * (k ** 9), 1.6 * (k ** 10), 1.6 * (k ** 11)])

    # Downsampling images
    doubled = misc.imresize(original, 200, 'bilinear').astype(int)
    normal = misc.imresize(doubled, 50, 'bilinear').astype(int)
    halved = misc.imresize(normal, 50, 'bilinear').astype(int)
    quartered = misc.imresize(halved, 50, 'bilinear').astype(int)

    # Initialize Gaussian pyramids
    pyrlvl1 = np.zeros((doubled.shape[0], doubled.shape[1], 6))
    pyrlvl2 = np.zeros((normal.shape[0], normal.shape[1], 6))
    pyrlvl3 = np.zeros((halved.shape[0], halved.shape[1], 6))
    pyrlvl4 = np.zeros((quartered.shape[0], quartered.shape[1], 6))

    print("Constructing pyramids...")

    # Construct Gaussian pyramids
    for i in range(0, 6):
        pyrlvl1[:,:,i] = ndimage.filters.gaussian_filter(doubled, kvec1[i])
        pyrlvl2[:,:,i] = misc.imresize(ndimage.filters.gaussian_filter(doubled, kvec2[i]), 50, 'bilinear') 
        pyrlvl3[:,:,i] = misc.imresize(ndimage.filters.gaussian_filter(doubled, kvec3[i]), 25, 'bilinear')
        pyrlvl4[:,:,i] = misc.imresize(ndimage.filters.gaussian_filter(doubled, kvec4[i]), 1.0 / 8.0, 'bilinear')

    # Initialize Difference-of-Gaussians (DoG) pyramids
    diffpyrlvl1 = np.zeros((doubled.shape[0], doubled.shape[1], 5))
    diffpyrlvl2 = np.zeros((normal.shape[0], normal.shape[1], 5))
    diffpyrlvl3 = np.zeros((halved.shape[0], halved.shape[1], 5))
    diffpyrlvl4 = np.zeros((quartered.shape[0], quartered.shape[1], 5))

    # Construct DoG pyramids
    for i in range(0, 5):
        diffpyrlvl1[:,:,i] = pyrlvl1[:,:,i+1] - pyrlvl1[:,:,i]
        diffpyrlvl2[:,:,i] = pyrlvl2[:,:,i+1] - pyrlvl2[:,:,i]
        diffpyrlvl3[:,:,i] = pyrlvl3[:,:,i+1] - pyrlvl3[:,:,i]
        diffpyrlvl4[:,:,i] = pyrlvl4[:,:,i+1] - pyrlvl4[:,:,i]

    # Initialize pyramids to store extrema locations
    extrpyrlvl1 = np.zeros((doubled.shape[0], doubled.shape[1], 3))
    extrpyrlvl2 = np.zeros((normal.shape[0], normal.shape[1], 3))
    extrpyrlvl3 = np.zeros((halved.shape[0], halved.shape[1], 3))
    extrpyrlvl4 = np.zeros((quartered.shape[0], quartered.shape[1], 3))

    print("Starting extrema detection...")
    print("First octave")

    # In each of the following for loops, elements of each pyramids that are larger or smaller than its 26 immediate neighbors in space and scale are labeled as extrema. As explained in section 4 of Lowe's paper, these initial extrema are pruned by checking that their contrast and curvature are above certain thresholds. The thresholds used here are those suggested by Lowe. 

    for i in range(1, 4):
        for j in range(80, doubled.shape[0] - 80):
            for k in range(80, doubled.shape[1] - 80):
                if np.absolute(diffpyrlvl1[j, k, i]) < threshold:
                    continue	    

                maxbool = (diffpyrlvl1[j, k, i] > 0)
                minbool = (diffpyrlvl1[j, k, i] < 0)

                for di in range(-1, 2):
                    for dj in range(-1, 2):
                        for dk in range(-1, 2):
                            if di == 0 and dj == 0 and dk == 0:
                                continue
                            maxbool = maxbool and (diffpyrlvl1[j, k, i] > diffpyrlvl1[j + dj, k + dk, i + di])
                            minbool = minbool and (diffpyrlvl1[j, k, i] < diffpyrlvl1[j + dj, k + dk, i + di])
                            if not maxbool and not minbool:
                                break

                        if not maxbool and not minbool:
                            break

                    if not maxbool and not minbool:
                        break

                if maxbool or minbool:
                    dx = (diffpyrlvl1[j, k+1, i] - diffpyrlvl1[j, k-1, i]) * 0.5 / 255
                    dy = (diffpyrlvl1[j+1, k, i] - diffpyrlvl1[j-1, k, i]) * 0.5 / 255
                    ds = (diffpyrlvl1[j, k, i+1] - diffpyrlvl1[j, k, i-1]) * 0.5 / 255
                    dxx = (diffpyrlvl1[j, k+1, i] + diffpyrlvl1[j, k-1, i] - 2 * diffpyrlvl1[j, k, i]) * 1.0 / 255        
                    dyy = (diffpyrlvl1[j+1, k, i] + diffpyrlvl1[j-1, k, i] - 2 * diffpyrlvl1[j, k, i]) * 1.0 / 255          
                    dss = (diffpyrlvl1[j, k, i+1] + diffpyrlvl1[j, k, i-1] - 2 * diffpyrlvl1[j, k, i]) * 1.0 / 255
                    dxy = (diffpyrlvl1[j+1, k+1, i] - diffpyrlvl1[j+1, k-1, i] - diffpyrlvl1[j-1, k+1, i] + diffpyrlvl1[j-1, k-1, i]) * 0.25 / 255 
                    dxs = (diffpyrlvl1[j, k+1, i+1] - diffpyrlvl1[j, k-1, i+1] - diffpyrlvl1[j, k+1, i-1] + diffpyrlvl1[j, k-1, i-1]) * 0.25 / 255 
                    dys = (diffpyrlvl1[j+1, k, i+1] - diffpyrlvl1[j-1, k, i+1] - diffpyrlvl1[j+1, k, i-1] + diffpyrlvl1[j-1, k, i-1]) * 0.25 / 255  

                    dD = np.matrix([[dx], [dy], [ds]])
                    H = np.matrix([[dxx, dxy, dxs], [dxy, dyy, dys], [dxs, dys, dss]])
                    x_hat = numpy.linalg.lstsq(H, dD)[0]
                    D_x_hat = diffpyrlvl1[j, k, i] + 0.5 * np.dot(dD.transpose(), x_hat)

                    r = 10.0
                    if ((((dxx + dyy) ** 2) * r) < (dxx * dyy - (dxy ** 2)) * (((r + 1) ** 2))) and (np.absolute(x_hat[0]) < 0.5) and (np.absolute(x_hat[1]) < 0.5) and (np.absolute(x_hat[2]) < 0.5) and (np.absolute(D_x_hat) > 0.03):
                        extrpyrlvl1[j, k, i - 1] = 1

    print("Second octave")

    for i in range(1, 4):
        for j in range(40, normal.shape[0] - 40):
            for k in range(40, normal.shape[1] - 40):
                if np.absolute(diffpyrlvl2[j, k, i]) < threshold:
                    continue	    

                maxbool = (diffpyrlvl2[j, k, i] > 0)
                minbool = (diffpyrlvl2[j, k, i] < 0)

                for di in range(-1, 2):
                    for dj in range(-1, 2):
                        for dk in range(-1, 2):
                            if di == 0 and dj == 0 and dk == 0:
                                continue
                            maxbool = maxbool and (diffpyrlvl2[j, k, i] > diffpyrlvl2[j + dj, k + dk, i + di])
                            minbool = minbool and (diffpyrlvl2[j, k, i] < diffpyrlvl2[j + dj, k + dk, i + di])
                            if not maxbool and not minbool:
                                break

                        if not maxbool and not minbool:
                            break

                    if not maxbool and not minbool:
                        break

                if maxbool or minbool:
                    dx = (diffpyrlvl2[j, k+1, i] - diffpyrlvl2[j, k-1, i]) * 0.5 / 255
                    dy = (diffpyrlvl2[j+1, k, i] - diffpyrlvl2[j-1, k, i]) * 0.5 / 255
                    ds = (diffpyrlvl2[j, k, i+1] - diffpyrlvl2[j, k, i-1]) * 0.5 / 255
                    dxx = (diffpyrlvl2[j, k+1, i] + diffpyrlvl2[j, k-1, i] - 2 * diffpyrlvl2[j, k, i]) * 1.0 / 255        
                    dyy = (diffpyrlvl2[j+1, k, i] + diffpyrlvl2[j-1, k, i] - 2 * diffpyrlvl2[j, k, i]) * 1.0 / 255          
                    dss = (diffpyrlvl2[j, k, i+1] + diffpyrlvl2[j, k, i-1] - 2 * diffpyrlvl2[j, k, i]) * 1.0 / 255
                    dxy = (diffpyrlvl2[j+1, k+1, i] - diffpyrlvl2[j+1, k-1, i] - diffpyrlvl2[j-1, k+1, i] + diffpyrlvl2[j-1, k-1, i]) * 0.25 / 255 
                    dxs = (diffpyrlvl2[j, k+1, i+1] - diffpyrlvl2[j, k-1, i+1] - diffpyrlvl2[j, k+1, i-1] + diffpyrlvl2[j, k-1, i-1]) * 0.25 / 255 
                    dys = (diffpyrlvl2[j+1, k, i+1] - diffpyrlvl2[j-1, k, i+1] - diffpyrlvl2[j+1, k, i-1] + diffpyrlvl2[j-1, k, i-1]) * 0.25 / 255  

                    dD = np.matrix([[dx], [dy], [ds]])
                    H = np.matrix([[dxx, dxy, dxs], [dxy, dyy, dys], [dxs, dys, dss]])
                    x_hat = numpy.linalg.lstsq(H, dD)[0]
                    D_x_hat = diffpyrlvl2[j, k, i] + 0.5 * np.dot(dD.transpose(), x_hat)

                    r = 10.0
                    if (((dxx + dyy) ** 2) * r) < (dxx * dyy - (dxy ** 2)) * (((r + 1) ** 2)) and np.absolute(x_hat[0]) < 0.5 and np.absolute(x_hat[1]) < 0.5 and np.absolute(x_hat[2]) < 0.5 and np.absolute(D_x_hat) > 0.03:
                        extrpyrlvl2[j, k, i - 1] = 1

    print("Third octave")
      
    for i in range(1, 4):
        for j in range(20, halved.shape[0] - 20):
            for k in range(20, halved.shape[1] - 20):
                if np.absolute(diffpyrlvl3[j, k, i]) < threshold:
                    continue	    

                maxbool = (diffpyrlvl3[j, k, i] > 0)
                minbool = (diffpyrlvl3[j, k, i] < 0)

                for di in range(-1, 2):
                    for dj in range(-1, 2):
                        for dk in range(-1, 2):
                            if di == 0 and dj == 0 and dk == 0:
                                continue
                            maxbool = maxbool and (diffpyrlvl3[j, k, i] > diffpyrlvl3[j + dj, k + dk, i + di])
                            minbool = minbool and (diffpyrlvl3[j, k, i] < diffpyrlvl3[j + dj, k + dk, i + di])
                            if not maxbool and not minbool:
                                break

                        if not maxbool and not minbool:
                            break

                    if not maxbool and not minbool:
                        break

                if maxbool or minbool:
                    dx = (diffpyrlvl3[j, k+1, i] - diffpyrlvl3[j, k-1, i]) * 0.5 / 255
                    dy = (diffpyrlvl3[j+1, k, i] - diffpyrlvl3[j-1, k, i]) * 0.5 / 255
                    ds = (diffpyrlvl3[j, k, i+1] - diffpyrlvl3[j, k, i-1]) * 0.5 / 255
                    dxx = (diffpyrlvl3[j, k+1, i] + diffpyrlvl3[j, k-1, i] - 2 * diffpyrlvl3[j, k, i]) * 1.0 / 255        
                    dyy = (diffpyrlvl3[j+1, k, i] + diffpyrlvl3[j-1, k, i] - 2 * diffpyrlvl3[j, k, i]) * 1.0 / 255          
                    dss = (diffpyrlvl3[j, k, i+1] + diffpyrlvl3[j, k, i-1] - 2 * diffpyrlvl3[j, k, i]) * 1.0 / 255
                    dxy = (diffpyrlvl3[j+1, k+1, i] - diffpyrlvl3[j+1, k-1, i] - diffpyrlvl3[j-1, k+1, i] + diffpyrlvl3[j-1, k-1, i]) * 0.25 / 255 
                    dxs = (diffpyrlvl3[j, k+1, i+1] - diffpyrlvl3[j, k-1, i+1] - diffpyrlvl3[j, k+1, i-1] + diffpyrlvl3[j, k-1, i-1]) * 0.25 / 255 
                    dys = (diffpyrlvl3[j+1, k, i+1] - diffpyrlvl3[j-1, k, i+1] - diffpyrlvl3[j+1, k, i-1] + diffpyrlvl3[j-1, k, i-1]) * 0.25 / 255  

                    dD = np.matrix([[dx], [dy], [ds]])
                    H = np.matrix([[dxx, dxy, dxs], [dxy, dyy, dys], [dxs, dys, dss]])
                    x_hat = numpy.linalg.lstsq(H, dD)[0]
                    D_x_hat = diffpyrlvl3[j, k, i] + 0.5 * np.dot(dD.transpose(), x_hat)

                    r = 10.0
                    if (((dxx + dyy) ** 2) * r) < (dxx * dyy - (dxy ** 2)) * (((r + 1) ** 2)) and np.absolute(x_hat[0]) < 0.5 and np.absolute(x_hat[1]) < 0.5 and np.absolute(x_hat[2]) < 0.5 and np.absolute(D_x_hat) > 0.03:
                        extrpyrlvl3[j, k, i - 1] = 1


    print("Fourth octave")

    for i in range(1, 4):
        for j in range(10, quartered.shape[0] - 10):
            for k in range(10, quartered.shape[1] - 10):
                if np.absolute(diffpyrlvl4[j, k, i]) < threshold:
                    continue	    

                maxbool = (diffpyrlvl4[j, k, i] > 0)
                minbool = (diffpyrlvl4[j, k, i] < 0)

                for di in range(-1, 2):
                    for dj in range(-1, 2):
                        for dk in range(-1, 2):
                            if di == 0 and dj == 0 and dk == 0:
                                continue
                            maxbool = maxbool and (diffpyrlvl4[j, k, i] > diffpyrlvl4[j + dj, k + dk, i + di])
                            minbool = minbool and (diffpyrlvl4[j, k, i] < diffpyrlvl4[j + dj, k + dk, i + di])
                            if not maxbool and not minbool:
                                break

                        if not maxbool and not minbool:
                            break

                    if not maxbool and not minbool:
                        break

                    if maxbool or minbool:
                        dx = (diffpyrlvl4[j, k+1, i] - diffpyrlvl4[j, k-1, i]) * 0.5 / 255
                        dy = (diffpyrlvl4[j+1, k, i] - diffpyrlvl4[j-1, k, i]) * 0.5 / 255
                        ds = (diffpyrlvl4[j, k, i+1] - diffpyrlvl4[j, k, i-1]) * 0.5 / 255
                        dxx = (diffpyrlvl4[j, k+1, i] + diffpyrlvl4[j, k-1, i] - 2 * diffpyrlvl4[j, k, i]) * 1.0 / 255        
                        dyy = (diffpyrlvl4[j+1, k, i] + diffpyrlvl4[j-1, k, i] - 2 * diffpyrlvl4[j, k, i]) * 1.0 / 255          
                        dss = (diffpyrlvl4[j, k, i+1] + diffpyrlvl4[j, k, i-1] - 2 * diffpyrlvl4[j, k, i]) * 1.0 / 255
                        dxy = (diffpyrlvl4[j+1, k+1, i] - diffpyrlvl4[j+1, k-1, i] - diffpyrlvl4[j-1, k+1, i] + diffpyrlvl4[j-1, k-1, i]) * 0.25 / 255 
                        dxs = (diffpyrlvl4[j, k+1, i+1] - diffpyrlvl4[j, k-1, i+1] - diffpyrlvl4[j, k+1, i-1] + diffpyrlvl4[j, k-1, i-1]) * 0.25 / 255 
                        dys = (diffpyrlvl4[j+1, k, i+1] - diffpyrlvl4[j-1, k, i+1] - diffpyrlvl4[j+1, k, i-1] + diffpyrlvl4[j-1, k, i-1]) * 0.25 / 255  

                    dD = np.matrix([[dx], [dy], [ds]])
                    H = np.matrix([[dxx, dxy, dxs], [dxy, dyy, dys], [dxs, dys, dss]])
                    x_hat = numpy.linalg.lstsq(H, dD)[0]
                    D_x_hat = diffpyrlvl4[j, k, i] + 0.5 * np.dot(dD.transpose(), x_hat)

                    r = 10.0
                    if (((dxx + dyy) ** 2) * r) < (dxx * dyy - (dxy ** 2)) * (((r + 1) ** 2)) and np.absolute(x_hat[0]) < 0.5 and np.absolute(x_hat[1]) < 0.5 and np.absolute(x_hat[2]) < 0.5 and np.absolute(D_x_hat) > 0.03:
                        extrpyrlvl4[j, k, i - 1] = 1


    print("Number of extrema in first octave: %d" % np.sum(extrpyrlvl1))
    print("Number of extrema in second octave: %d" % np.sum(extrpyrlvl2))
    print("Number of extrema in third octave: %d" % np.sum(extrpyrlvl3))
    print("Number of extrema in fourth octave: %d" % np.sum(extrpyrlvl4))
    
    # Gradient magnitude and orientation for each image sample point at each scale
    magpyrlvl1 = np.zeros((doubled.shape[0], doubled.shape[1], 3))
    magpyrlvl2 = np.zeros((normal.shape[0], normal.shape[1], 3))
    magpyrlvl3 = np.zeros((halved.shape[0], halved.shape[1], 3))
    magpyrlvl4 = np.zeros((quartered.shape[0], quartered.shape[1], 3))

    oripyrlvl1 = np.zeros((doubled.shape[0], doubled.shape[1], 3))
    oripyrlvl2 = np.zeros((normal.shape[0], normal.shape[1], 3))
    oripyrlvl3 = np.zeros((halved.shape[0], halved.shape[1], 3))
    oripyrlvl4 = np.zeros((quartered.shape[0], quartered.shape[1], 3))
    
    for i in range(0, 3):
        for j in range(1, doubled.shape[0] - 1):
            for k in range(1, doubled.shape[1] - 1):
                magpyrlvl1[j, k, i] = ( ((doubled[j+1, k] - doubled[j-1, k]) ** 2) + ((doubled[j, k+1] - doubled[j, k-1]) ** 2) ) ** 0.5   
                oripyrlvl1[j, k, i] = (36 / (2 * np.pi)) * (np.pi + np.arctan2((doubled[j, k+1] - doubled[j, k-1]), (doubled[j+1, k] - doubled[j-1, k])))        
                
    for i in range(0, 3):
        for j in range(1, normal.shape[0] - 1):
            for k in range(1, normal.shape[1] - 1):
                magpyrlvl2[j, k, i] = ( ((normal[j+1, k] - normal[j-1, k]) ** 2) + ((normal[j, k+1] - normal[j, k-1]) ** 2) ) ** 0.5   
                oripyrlvl2[j, k, i] = (36 / (2 * np.pi)) * (np.pi + np.arctan2((normal[j, k+1] - normal[j, k-1]), (normal[j+1, k] - normal[j-1, k])))    

    for i in range(0, 3):
        for j in range(1, halved.shape[0] - 1):
            for k in range(1, halved.shape[1] - 1):
                magpyrlvl3[j, k, i] = ( ((halved[j+1, k] - halved[j-1, k]) ** 2) + ((halved[j, k+1] - halved[j, k-1]) ** 2) ) ** 0.5   
                oripyrlvl3[j, k, i] = (36 / (2 * np.pi)) * (np.pi + np.arctan2((halved[j, k+1] - halved[j, k-1]), (halved[j+1, k] - halved[j-1, k])))    

    for i in range(0, 3):
        for j in range(1, quartered.shape[0] - 1):
            for k in range(1, quartered.shape[1] - 1):
                magpyrlvl4[j, k, i] = ( ((quartered[j+1, k] - quartered[j-1, k]) ** 2) + ((quartered[j, k+1] - quartered[j, k-1]) ** 2) ) ** 0.5   
                oripyrlvl4[j, k, i] = (36 / (2 * np.pi)) * (np.pi + np.arctan2((quartered[j, k+1] - quartered[j, k-1]), (quartered[j+1, k] - quartered[j-1, k])))    

    extr_sum = np.sum(extrpyrlvl1) + np.sum(extrpyrlvl2) + np.sum(extrpyrlvl3) + np.sum(extrpyrlvl4)
    keypoints = np.zeros((extr_sum, 4)) 

    print("Calculating keypoint orientations...")

    count = 0
    
    for i in range(0, 3):
        for j in range(80, doubled.shape[0] - 80):
            for k in range(80, doubled.shape[1] - 80):
                if extrpyrlvl1[j, k, i] == 1:
                    gaussian_window = multivariate_normal(mean=[j, k], cov=((1.5 * kvectotal[i]) ** 2))
                    two_sd = np.floor(2 * 1.5 * kvectotal[i])
                    orient_hist = np.zeros([36,1])
                    for x in range(int(-1 * two_sd * 2), int(two_sd * 2) + 1):
                        ylim = int((((two_sd * 2) ** 2) - (np.absolute(x) ** 2)) ** 0.5)
                        for y in range(-1 * ylim, ylim + 1):
                            if j + x < 0 or j + x > doubled.shape[0] - 1 or k + y < 0 or k + y > doubled.shape[1] - 1:
                                continue
                            weight = magpyrlvl1[j + x, k + y, i] * gaussian_window.pdf([j + x, k + y])
                            bin_idx = np.clip(np.floor(oripyrlvl1[j + x, k + y, i]), 0, 35)
                            orient_hist[np.floor(bin_idx)] += weight  
                    
                    maxval = np.amax(orient_hist)
                    maxidx = np.argmax(orient_hist)
                    keypoints[count, :] = np.array([int(j * 0.5), int(k * 0.5), kvectotal[i], maxidx])
                    count += 1
                    orient_hist[maxidx] = 0
                    newmaxval = np.amax(orient_hist)
                    while newmaxval > 0.8 * maxval:
                        newmaxidx = np.argmax(orient_hist)
                        np.append(keypoints, np.array([[int(j * 0.5), int(k * 0.5), kvectotal[i], newmaxidx]]), axis=0)
                        orient_hist[newmaxidx] = 0
                        newmaxval = np.amax(orient_hist)
    
                    
    for i in range(0, 3):
        for j in range(40, normal.shape[0] - 40):
            for k in range(40, normal.shape[1] - 40):
                if extrpyrlvl2[j, k, i] == 1:
                    gaussian_window = multivariate_normal(mean=[j, k], cov=((1.5 * kvectotal[i + 3]) ** 2))
                    two_sd = np.floor(2 * 1.5 * kvectotal[i + 3])
                    orient_hist = np.zeros([36,1])
                    for x in range(int(-1 * two_sd), int(two_sd + 1)):
                        ylim = int(((two_sd ** 2) - (np.absolute(x) ** 2)) ** 0.5)
                        for y in range(-1 * ylim, ylim + 1):
                            if j + x < 0 or j + x > normal.shape[0] - 1 or k + y < 0 or k + y > normal.shape[1] - 1:
                                continue
                            weight = magpyrlvl2[j + x, k + y, i] * gaussian_window.pdf([j + x, k + y])
                            bin_idx = np.clip(np.floor(oripyrlvl2[j + x, k + y, i]), 0, 35)
                            orient_hist[np.floor(bin_idx)] += weight  
                    
                    maxval = np.amax(orient_hist)
                    maxidx = np.argmax(orient_hist)
                    keypoints[count, :] = np.array([j, k, kvectotal[i + 3], maxidx])
                    count += 1
                    orient_hist[maxidx] = 0
                    newmaxval = np.amax(orient_hist)
                    while newmaxval > 0.8 * maxval:
                        newmaxidx = np.argmax(orient_hist)
                        np.append(keypoints, np.array([[j, k, kvectotal[i + 3], newmaxidx]]), axis=0)
                        orient_hist[newmaxidx] = 0
                        newmaxval = np.amax(orient_hist)
    

    for i in range(0, 3):
        for j in range(20, halved.shape[0] - 20):
            for k in range(20, halved.shape[1] - 20):
                if extrpyrlvl3[j, k, i] == 1:
                    gaussian_window = multivariate_normal(mean=[j, k], cov=((1.5 * kvectotal[i + 6]) ** 2))
                    two_sd = np.floor(2 * 1.5 * kvectotal[i + 6])
                    orient_hist = np.zeros([36,1])
                    for x in range(int(-1 * two_sd * 0.5), int(two_sd * 0.5) + 1):
                        ylim = int((((two_sd * 0.5) ** 2) - (np.absolute(x) ** 2)) ** 0.5)
                        for y in range(-1 * ylim, ylim + 1):
                            if j + x < 0 or j + x > halved.shape[0] - 1 or k + y < 0 or k + y > halved.shape[1] - 1:
                                continue
                            weight = magpyrlvl3[j + x, k + y, i] * gaussian_window.pdf([j + x, k + y])
                            bin_idx = np.clip(np.floor(oripyrlvl3[j + x, k + y, i]), 0, 35)
                            orient_hist[np.floor(bin_idx)] += weight  
                    
                    maxval = np.amax(orient_hist)
                    maxidx = np.argmax(orient_hist)
                    keypoints[count, :] = np.array([j * 2, k * 2, kvectotal[i + 6], maxidx])
                    count += 1
                    orient_hist[maxidx] = 0
                    newmaxval = np.amax(orient_hist)
                    while newmaxval > 0.8 * maxval:
                        newmaxidx = np.argmax(orient_hist)
                        np.append(keypoints, np.array([[j * 2, k * 2, kvectotal[i + 6], newmaxidx]]), axis=0)
                        orient_hist[newmaxidx] = 0
                        newmaxval = np.amax(orient_hist)
    

    for i in range(0, 3):
        for j in range(10, quartered.shape[0] - 10):
            for k in range(10, quartered.shape[1] - 10):
                if extrpyrlvl4[j, k, i] == 1:
                    gaussian_window = multivariate_normal(mean=[j, k], cov=((1.5 * kvectotal[i + 9]) ** 2))
                    two_sd = np.floor(2 * 1.5 * kvectotal[i + 9])
                    orient_hist = np.zeros([36,1])
                    for x in range(int(-1 * two_sd * 0.25), int(two_sd * 0.25) + 1):
                        ylim = int((((two_sd * 0.25) ** 2) - (np.absolute(x) ** 2)) ** 0.5)
                        for y in range(-1 * ylim, ylim + 1):
                            if j + x < 0 or j + x > quartered.shape[0] - 1 or k + y < 0 or k + y > quartered.shape[1] - 1:
                                continue
                            weight = magpyrlvl4[j + x, k + y, i] * gaussian_window.pdf([j + x, k + y])
                            bin_idx = np.clip(np.floor(oripyrlvl4[j + x, k + y, i]), 0, 35)
                            orient_hist[np.floor(bin_idx)] += weight  
                    
                    maxval = np.amax(orient_hist)
                    maxidx = np.argmax(orient_hist)
                    keypoints[count, :] = np.array([j * 4, k * 4, kvectotal[i + 9], maxidx])
                    count += 1
                    orient_hist[maxidx] = 0
                    newmaxval = np.amax(orient_hist)
                    while newmaxval > 0.8 * maxval:
                        newmaxidx = np.argmax(orient_hist)
                        np.append(keypoints, np.array([[j * 4, k * 4, kvectotal[i + 9], newmaxidx]]), axis=0)
                        orient_hist[newmaxidx] = 0
                        newmaxval = np.amax(orient_hist)
    

    print("Calculating descriptor...")

    magpyr = np.zeros((normal.shape[0], normal.shape[1], 12))
    oripyr = np.zeros((normal.shape[0], normal.shape[1], 12))

    for i in range(0, 3):
        magmax = np.amax(magpyrlvl1[:, :, i])
        magpyr[:, :, i] = misc.imresize(magpyrlvl1[:, :, i], (normal.shape[0], normal.shape[1]), "bilinear").astype(float)
        magpyr[:, :, i] = (magmax / np.amax(magpyr[:, :, i])) * magpyr[:, :, i]  
        oripyr[:, :, i] = misc.imresize(oripyrlvl1[:, :, i], (normal.shape[0], normal.shape[1]), "bilinear").astype(int)    
        oripyr[:, :, i] = ((36.0 / np.amax(oripyr[:, :, i])) * oripyr[:, :, i]).astype(int)

    for i in range(0, 3):
        magpyr[:, :, i+3] = (magpyrlvl2[:, :, i]).astype(float)
        oripyr[:, :, i+3] = (oripyrlvl2[:, :, i]).astype(int)             
    
    for i in range(0, 3):
        magpyr[:, :, i+6] = misc.imresize(magpyrlvl3[:, :, i], (normal.shape[0], normal.shape[1]), "bilinear").astype(int)   
        oripyr[:, :, i+6] = misc.imresize(oripyrlvl3[:, :, i], (normal.shape[0], normal.shape[1]), "bilinear").astype(int)    

    for i in range(0, 3):
        magpyr[:, :, i+9] = misc.imresize(magpyrlvl4[:, :, i], (normal.shape[0], normal.shape[1]), "bilinear").astype(int)   
        oripyr[:, :, i+9] = misc.imresize(oripyrlvl4[:, :, i], (normal.shape[0], normal.shape[1]), "bilinear").astype(int)    
        

    descriptors = np.zeros([keypoints.shape[0], 128])

    for i in range(0, keypoints.shape[0]): 
        for x in range(-8, 8):
            for y in range(-8, 8):
                theta = 10 * keypoints[i,3] * np.pi / 180.0
                xrot = np.round((np.cos(theta) * x) - (np.sin(theta) * y))
                yrot = np.round((np.sin(theta) * x) + (np.cos(theta) * y))
                scale_idx = np.argwhere(kvectotal == keypoints[i,2])[0][0]
                x0 = keypoints[i,0]
                y0 = keypoints[i,1]
                gaussian_window = multivariate_normal(mean=[x0,y0], cov=8) 
                weight = magpyr[int(x0 + xrot), int(y0 + yrot), scale_idx] * gaussian_window.pdf([x0 + xrot, y0 + yrot])
                angle = oripyr[int(x0 + xrot), int(y0 + yrot), scale_idx] - keypoints[i,3]
                if angle < 0:
                    angle = 36 + angle

                bin_idx = np.clip(np.floor((8.0 / 36) * angle), 0, 7).astype(int)
                descriptors[i, 32 * int((x + 8)/4) + 8 * int((y + 8)/4) + bin_idx] += weight
        
        descriptors[i, :] = descriptors[i, :] / norm(descriptors[i, :]) 
        descriptors[i, :] = np.clip(descriptors[i, :], 0, 0.2)
        descriptors[i, :] = descriptors[i, :] / norm(descriptors[i, :])
                
 
    return [keypoints, descriptors]
Example #45
0
 def __call__(self, theta):
     kernel = stats.multivariate_normal(theta, self.sigma).pdf
     w = self.prior(theta) / np.sum(self.ws * kernel(self.samples))
     return w
Example #46
0
 def d_lik(self, x, y):
     """ Y_t | X_t """
     return stats.multivariate_normal(self.model_nu, self.model_tau).pdf(y-x)
Example #47
0
mprior = Nonparametric(CSI.T)


# KDE-based proposal
def kde_proposal(CSI):
    return mprior.sample(n_samples=nsamples)


from utils import alltimes, history
timesteps = [1812, 2421, 3029]  # chosen timesteps for Bayesian inference

# history-based uncertainty mitigation
for i, t in enumerate(timesteps, 1):
    dobs = history[alltimes == t, :].flatten()
    dprior = multivariate_normal(mean=dobs, cov=.1)

    # likelihood under perfect forwarding assumption
    def lnlike(csi):
        m = kpca.predict(csi).clip(0, 1)
        return dprior.logpdf(G(m, [t]))

    # posterior sigma_m(m) ~ rho_d(G(m)) * rho_m(m)
    def lnprob(csi):
        m = kpca.predict(csi).clip(0, 1)
        return mprior.logpdf(csi) + dprior.logpdf(G(m, [t]))

    if pool.is_master():
        ### There are two possible configurations:

        # a) (symmetric) stretch move
Example #48
0
def tidalsite():

    MinDist = 20
    dx = MinDist * 4
    dy = dx / 4
    pos = []
    for i in range(5):
        for j in range(5):
            if not (i) % 2:
                temp = [i * dx, j * dy - dy / 2]
            else:
                temp = [i * dx, j * dy]

            pos.append(temp)

    pos = [item for item in pos if item[1] >= 0]

    # #x,y coordinate of the statistical analysis
    # # Statistical analysis generation
    # # --------------------------------
    x = np.linspace(0., 1000., 100)
    y = np.linspace(0., 300., 30)

    # Lease Area
    leaseAreaVertexUTM = np.array(
        [[50., 50.], [950., 50.], [950., 250.], [50., 250.]], dtype=float)

    # Nogo areas
    Nogoareas_wave = []

    nx = len(x)
    ny = len(y)

    # Tidal time series
    time_points = 1

    rv = norm()
    time_pdf = rv.pdf(np.linspace(-2, 2, time_points))
    time_scaled = time_pdf * (1. / np.amax(time_pdf))

    xgrid, ygrid = np.meshgrid(x, y)
    pos = np.dstack((xgrid, ygrid))

    rv = multivariate_normal(
        [x.mean(), y.mean()],
        [[max(x) * 5., max(y) * 2.], [max(y) * 2., max(x) * 5.]])

    #u_max = 10.
    u_max = 5.
    v_max = 1.
    ssh_max = 1.

    grid_pdf = rv.pdf(pos)

    u_scaled = grid_pdf * (u_max / np.amax(grid_pdf))
    v_scaled = np.ones((ny, nx)) * v_max
    ssh_scaled = grid_pdf * (ssh_max / np.amax(grid_pdf))

    u_arrays = []
    v_arrays = []
    ssh_arrays = []

    for multiplier in time_scaled:

        u_arrays.append(u_scaled * multiplier)
        v_arrays.append(v_scaled * multiplier)
        ssh_arrays.append(ssh_scaled * multiplier)

    U = np.dstack(u_arrays)
    V = np.dstack(v_arrays)
    SSH = np.dstack(ssh_arrays)
    U = U * 0 + 2
    V = V * 0 + 0
    TI = np.array([0.1])
    p = np.ones(U.shape[-1])

    # END of Statistical analysis generation
    # ---------------------------------------
    Meteocean = {'V': V, 'U': U, 'p': p, 'TI': TI, 'x': x, 'y': y, 'SSH': SSH}
    VelocityShear = np.array([7.])
    MainDirection = None  #np.array([1.,1.])
    #ang = np.pi*0.25
    #MainDirection = np.array([np.cos(ang),np.sin(ang)])

    #Temp check nogo areas
    #xb = np.linspace(0,100,10)
    #yb = np.linspace(0,50,5)

    Bathymetry = np.array([-60.])
    Geophysics = np.array([0.3])

    BR = 1.
    electrical_connection_point = (-1000.0, -4000.0)

    out = [
        leaseAreaVertexUTM, Nogoareas_wave, Meteocean, VelocityShear,
        MainDirection, Bathymetry, Geophysics, BR, electrical_connection_point
    ]

    return out
Example #49
0
    else:
        #myself
        n_iteration = 100
        n, d = data.shape
        mu1 = np.random.standard_normal(d)
        print(mu1)
        mu2 = np.random.standard_normal(d)
        print(mu2)
        sigma1 = np.identity(
            d
        )  #The identity array is a square array with ones on the main diagonal.
        sigma2 = np.identity(d)
        pi = 0.5
        for i in range(n_iteration):
            norm1 = multivariate_normal(mu1, sigma1)
            norm2 = multivariate_normal(mu2, sigma2)
            tau1 = pi * norm1.pdf(data)
            tau2 = (1 - pi) * norm2.pdf(data)
            gamma = tau1 / (tau1 + tau2)

            #m-step
            mu1 = np.dot(gamma, data) / np.sum(gamma)
            mu2 = np.dot((1 - gamma), data) / np.sum(1 - gamma)
            sigma1 = np.dot(gamma * (data - mu1).T, data) / np.sum(gamma)
            sigma2 = np.dot(
                (1 - gamma) * (data - mu2).T, data) / np.sum(1 - gamma)
            pi = np.sum(gamma) / n
            print(i, ":\t", mu1, mu2)
        print('类别概率:\t', pi)
        print('均值:\t', mu1, mu2)
Example #50
0
 def d_init(self, x):
     """ X_0 """
     return stats.multivariate_normal(self.model_mu,self.model_sigma).pdf(x)
Example #51
0
			if((cache[j] != crude_data[i]) and \
					(output[i, j, 0] > LRU_before)):
				output[i, j, 0] -= 1;
	
	factor = np.zeros((2));
	for j in range(num_bucket):
		f_value[j] = output[i, j, 1];
		if(f_value[j] > 100):
			f_value[j] = 100;
	
	factor[0] = np.mean(f_value);
	factor[1] = np.var(f_value);
	# Update distances (in this case, possibility)
	for label in range(num_label):
		if(mu[label][0] != 0):
			rv = multivariate_normal(mean = mu[label], cov = sigma[label]);
			distance[label] = np.log(pi[label]) +rv.logpdf(factor);
		else:
			distance[label] = -99999;
	"""
	for j in range(num_bucket):
#print(j);
#		print(mu[j]);
#		print(sigma[j]);
		if(mu[j][0] != 0):
			distance[j] = rv.logpdf(
			constant =  1 / (pow(2 * math.pi, 2) * pow(np.linalg.det(sigma[j]), 1/2));
			exp = -1/2 * np.dot(inv(sigma[j]), (factor - mu[j]));
			exp = np.dot((factor - mu[j]).transpose(), exp);
			distance[j] = constant * np.exp(exp);
		else:
 def gen_gaussian(mean_in, cov_in, class_label, num):
     nv = multivariate_normal(mean=mean_in, cov=cov_in)
     X = nv.rvs(num)
     y = np.ones(num, dtype=float) * class_label
     return nv, X, y
Example #53
0
    def update(self, agents, t, sleeping=False):
        if len(
                self.replay_buffer
        ) < self.max_replay_buffer_len:  # replay buffer is not large enough
            return
        if not t % 100 == 0:  # update every 100 steps
            return

        smallest_batch_index = 0
        smallest_batch_size = -1
        for i in range(self.n):
            if (smallest_batch_size == -1
                    or len(agents[i].replay_buffer) < smallest_batch_size):
                smallest_batch_size = len(agents[i].replay_buffer)
                smallest_batch_index = i

        # Different agents have different amounts of experience, need to use the smallest to get list of experience from memory
        self.replay_sample_index = agents[
            smallest_batch_index].replay_buffer.make_index(
                self.args.batch_size)
        obs_n = []
        obs_next_n = []
        act_n = []
        index = self.replay_sample_index
        for i in range(self.n):
            obs, act, mask, rew, obs_next, done = agents[
                i].replay_buffer.sample_index(index)
            obs_n.append(obs)
            obs_next_n.append(obs_next)
            act_n.append(act)
        obs, act, masks, rew, obs_next, done = self.replay_buffer.sample_index(
            index)

        rew = rew.flatten()

        signal = False
        mir_penalty = 0
        if (self.mic > 0 and (not self.args.sleep_regimen or
                              (self.args.sleep_regimen and sleeping))
            ):  # If sleep regimen is on, only use mic when sleeping
            try:

                multivar = multivariate_normal(self.multivariate_mean,
                                               self.multivariate_cov,
                                               allow_singular=True)
                logp_phi = multivar.logpdf(act)
                logp_phi = logp_phi.reshape(self.args.batch_size, )

                p_phi = multivar.pdf(act)
                p_phi = p_phi.reshape(self.args.batch_size, )
                action_mean = np.mean(act, axis=0)
                action_mean = action_mean + 1e-6  # add small value so probabilities close to zero arent problematic
                action_mean = action_mean / np.sum(action_mean)  # normalize
                action_std = np.std(act / np.sum(act), axis=0)
                action_cov = np.diag(action_std)
                policy_multivar = multivariate_normal(action_mean,
                                                      action_cov,
                                                      allow_singular=True)
                #policy_multivar = multivariate_normal(action_mean, self.multivariate_cov, allow_singular=True)
                logp_pi = policy_multivar.logpdf(act)
                logp_pi = logp_pi.reshape(self.args.batch_size, )

                p_pi = policy_multivar.pdf(act)
                p_pi = p_pi.reshape(self.args.batch_size, )

                phi_entropy = -1 * np.sum(logp_phi)  # * p_phi)
                pi_entropy = -1 * np.sum(logp_pi)  # * p_pi)

                mir_penalty = self.mic * (phi_entropy - pi_entropy)
            except:
                mir_penalty = 0

        num_sample = 1
        target_q = 0.0
        for i in range(num_sample):
            target_act_next_n = [
                agents[i].p_debug['target_act'](obs_next_n[i])
                for i in range(self.n)
            ]
            target_q_next = self.q_debug['target_q_values'](
                *(obs_next_n + target_act_next_n))
            target_q += (rew - mir_penalty
                         ) + self.args.gamma * (1.0 - done) * target_q_next
        target_q /= num_sample

        q_loss = self.q_train(*(obs_n + act_n + [target_q]))

        # train p network
        p_loss = self.p_train(*(obs_n + act_n))

        self.p_update()
        self.q_update()

        return [
            q_loss, p_loss,
            np.mean(target_q),
            np.mean(rew),
            np.mean(target_q_next),
            np.std(target_q)
        ]
def GetData(img_size, max_path_width, show=False):
    num_per_edge = random.randint(1, 2)

    w, h = img_size
    ww, hh = int(w / 8.0), int(h / 8.0)
    pad_w = math.ceil(1.0 / num_per_edge * w / 4)
    pad_h = math.ceil(1.0 / num_per_edge * h / 4)
    l = [(0,
          random.randint(
              int(1.0 / num_per_edge * h * i) + pad_h,
              int(1.0 / num_per_edge * h * (i + 1)) - pad_h))
         for i in range(num_per_edge)]
    u = [(random.randint(
        int(1.0 / num_per_edge * w * i) + pad_w,
        int(1.0 / num_per_edge * w * (i + 1)) - pad_w), 0)
         for i in range(num_per_edge)]
    r = [(w,
          random.randint(
              int(1.0 / num_per_edge * h * i) + pad_h,
              int(1.0 / num_per_edge * h * (i + 1)) - pad_h))
         for i in range(num_per_edge)]
    d = [(random.randint(
        int(1.0 / num_per_edge * w * i) + pad_w,
        int(1.0 / num_per_edge * w * (i + 1)) - pad_w), h)
         for i in range(num_per_edge)]
    p = l + u + r + d
    while True:
        random.shuffle(p)
        segs = []
        for x, y in zip(p[:num_per_edge * 2], p[num_per_edge * 2:]):
            if x[0] == 0 and y[0] == 0 or x[0] == w and y[0] == w or x[
                    1] == 0 and y[1] == 0 or x[1] == h and y[1] == h:
                continue
            segs.append([x, y])
        if segs:
            break

    img = Image.new('RGB', img_size, color=(255, 255, 255))
    road_color = (random.randint(0, 255), random.randint(0, 255),
                  random.randint(0, 255))
    draw = ImageDraw.Draw(img)
    dirfld = np.zeros((h, w, 2), np.float32)
    pts = []
    for seg in segs:
        seg_w = random.randint(int(max_path_width * 0.6), max_path_width)
        seg1, seg2 = direction(seg, seg_w)
        dirfld += dir_field(seg1, w, h, seg_w)
        dirfld += dir_field(seg2, w, h, seg_w)
        draw.line(extend_seg(seg), fill=road_color, width=seg_w)
        # draw.ellipse(make_ellipse(seg[0]), fill = (128, 128, 128), outline = (0, 0, 0))
        # draw.ellipse(make_ellipse(seg[1]), fill = (128, 128, 128), outline = (0, 0, 0))
        pts.append(seg[0])
        pts.append(seg[1])
    for i in range(len(segs)):
        for j in range(i, len(segs)):
            inter = get_crossing(segs[i], segs[j])
            if inter:
                pts.append(inter)
                # draw.ellipse(make_ellipse(inter), fill = (128, 128, 128), outline = (0, 0, 0))
    img = pepper(np.array(img))

    if show:
        plt.figure()
        plt.imshow(img)
        plt.show()

    Y, X = np.mgrid[0:ww, 0:hh]
    U = cv2.resize(dirfld[..., 0],
                   dsize=(ww, hh),
                   interpolation=cv2.INTER_LINEAR)
    V = cv2.resize(dirfld[..., 1],
                   dsize=(ww, hh),
                   interpolation=cv2.INTER_LINEAR)
    N = np.sqrt(U**2 + V**2)
    N[N < 1e-6] = np.inf
    U, V = U / N, V / N

    if show:
        plt.figure()
        Q = plt.quiver(X, 32 - Y, U, -V)
        plt.axis('equal')
        plt.show()

    pos = np.empty((hh, ww, 2))
    pos[..., 1], pos[..., 0] = np.mgrid[0:ww, 0:hh]
    heatmap = np.zeros((hh, ww))
    for pt in pts:
        rv = multivariate_normal(
            np.array(pt) / 8 - np.array([0.5, 0.5]), [[1, 0], [0, 1]])
        heatmap = np.maximum(heatmap, rv.pdf(pos))
    heatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min())

    if show:
        plt.figure()
        plt.imshow(heatmap)
        plt.axis('equal')
        plt.show()

    gt = np.zeros((hh, ww, 3))
    gt[..., 0], gt[..., 1], gt[..., 2] = heatmap, U, V
    mask = U**2 + V**2
    mask[mask < 0.5] = 0
    mask[mask >= 0.5] = 1
    if show:
        plt.figure()
        plt.imshow(mask)
        plt.axis('equal')
        plt.show()
    return img, gt, mask
def class_multivariate_normal(mu, cov_matrix):
    N = multivariate_normal(mu, cov_matrix)
    return N
Example #56
0
def calculate_gmm(
    header, face_dict
):  #return a set of GMM models corresponding to a range of number of clusters
    #also return BIC score as an estimator of the best number of clusters
    plt.figure(figsize=(5, 5))

    features = [' AU06_r', ' AU12_r']
    outfile = 'clusters.csv'
    #df = pd.read_csv('example/interrogator.csv')
    df = pd.read_csv('example/all_frames.csv')
    X = df.loc[:, features].dropna().values

    #gmm_list = []
    lowest_bic = np.infty
    bic = []
    n_components_range = range(1, 11)  #initially 1-12
    #n_components_range = range(5, 6)
    cv_types = ['spherical', 'tied', 'diag', 'full']
    cv_types = ['full']
    for cv_type in cv_types:
        for n_components in n_components_range:
            # Fit a Gaussian mixture with EM
            gmm = mixture.GaussianMixture(
                n_components=n_components,
                covariance_type=cv_type,
                tol=1e-8,
                #tol=1e-6,
                max_iter=1000,
                #max_iter=100,
                n_init=3,
                reg_covar=2e-3)
            gmm.fit(X)
            #gmm_list.append(gmm)
            print('n_components:', n_components, ', n-iter:', gmm.n_iter_)
            bic.append(gmm.bic(X))
            plot_gmm(gmm, X, features, 'clusterContour', bic[-1], False)

            if bic[-1] < lowest_bic:
                lowest_bic = bic[-1]
                best_gmm = gmm

            #analyze_face_result(header, face_dict, gmm)
            analyze_face_soft_result(header, face_dict, gmm)
            print(gmm.means_)
            sigmas = np.empty(gmm.covariances_.shape[0], dtype=object)
            for i in range(sigmas.shape[0]):
                sigmas[i] = gmm.covariances_[i]
            local_cluster_data = np.concatenate(
                (gmm.means_, sigmas[:, np.newaxis]), axis=1)
            df_clusters = pd.DataFrame(data=local_cluster_data,
                                       columns=features + ['sigmas'])
            currentCluster = 'cluster_' + str(n_components) + '.csv'
            df_clusters.to_csv(currentCluster, index=False)

    bic = np.array(bic)
    color_iter = itertools.cycle(
        ['navy', 'turquoise', 'cornflowerblue', 'darkorange'])
    clf = best_gmm
    bars = []

    # Plot the BIC scores
    #spl = plt.subplot(2, 1, 1)
    for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
        xpos = np.array(n_components_range) + .2 * (i - 2)
        bars.append(
            plt.bar(xpos,
                    bic[i * len(n_components_range):(i + 1) *
                        len(n_components_range)],
                    width=.2,
                    color=color))
    plt.xticks(n_components_range)
    plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
    plt.title('BIC score per model')
    xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
        .2 * np.floor(bic.argmin() / len(n_components_range))
    plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
    #spl.set_xlabel('Number of components')
    #spl.legend([b[0] for b in bars], cv_types)
    plt.xlabel('Number of components')
    plt.legend([b[0] for b in bars], cv_types)
    plt.savefig('BIC scores')

    # Plot the winner
    plt.figure(figsize=(8, 8))
    splot = plt.subplot(1, 1, 1)
    Y_ = clf.predict(X)

    for i, (mean, cov,
            color) in enumerate(zip(clf.means_, clf.covariances_, color_iter)):
        #cov = cov * np.eye(2)
        v, w = linalg.eigh(cov)
        if not np.any(Y_ == i):
            continue
        plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color, alpha=.5)
        '''
        # Plot an ellipse to show the Gaussian component
        angle = np.arctan2(w[0][1], w[0][0])
        angle = 180. * angle / np.pi  # convert to degrees
        v = 2. * np.sqrt(2.) * np.sqrt(v)
        ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
        ell.set_clip_box(splot.bbox)
        ell.set_alpha(.75)
        splot.add_artist(ell)
        '''
        x_vals = np.linspace(X[:, 0].min(), X[:, 0].max(), 50)
        y_vals = np.linspace(X[:, 1].min(), X[:, 1].max(), 50)
        x, y = np.meshgrid(x_vals, y_vals)
        pos = np.empty(x.shape + (2, ))
        pos[:, :, 0] = x
        pos[:, :, 1] = y
        rv = multivariate_normal(mean, cov)

        try:  # not sure why, was running into ValueErrors
            plt.contour(x, y, rv.pdf(pos))
        except ValueError:
            pass

    print(clf.means_)
    sigmas = np.empty(clf.covariances_.shape[0], dtype=object)
    for i in range(sigmas.shape[0]):
        sigmas[i] = clf.covariances_[i]
    cluster_data = np.concatenate((clf.means_, sigmas[:, np.newaxis]), axis=1)
    df_clusters = pd.DataFrame(data=cluster_data,
                               columns=features + ['sigmas'])
    df_clusters.to_csv(outfile, index=False)
    #plt.xticks(())
    #plt.yticks(())
    plt.title('best GMM')
    plt.xlabel(features[0])
    plt.ylabel(features[1])
    #plt.subplots_adjust(hspace=.35, bottom=.02)
    plt.savefig('clusterContours.png')
Example #57
0
 def multivariateGaussian(dataset, mu, sigma):
     p = multivariate_normal(mean=mu, cov=sigma)
     return p.pdf(dataset)
Example #58
0
def generate_gaussian_response(shape, cov):
    mvn = multivariate_normal(mean=np.zeros(2), cov=cov)
    grid = build_grid(shape)
    return mvn.pdf(grid)[None]
Example #59
0
 def d_init_prop(self, x,y):
     return stats.multivariate_normal(self.filter_mu, self.filter_sigma).pdf(x)
Example #60
0
File: emb.py Project: jzbjyb/RelEnt
def sim_func(child_emb,
             parent_emb,
             method='cosine',
             sigma=1.0,
             kde_c=None,
             kde_p=None,
             kde_c_prob=None,
             kde_p_prob=None):
    if method == 'cosine':
        return np.mean(cosine_similarity(child_emb, parent_emb))

    if method == 'euc':
        return -np.mean(euclidean_distances(child_emb, parent_emb))

    if method == 'expeuc':
        dist = np.expand_dims(child_emb, 1) - np.expand_dims(parent_emb, 0)
        dist = np.sum(dist * dist, -1)
        dist_min = np.min(dist, -1)
        sim = np.exp(-dist + np.expand_dims(dist_min, -1))
        sim = np.mean(np.log(np.mean(sim, -1)) - dist_min)
        return sim

    if method == 'mixgau':
        emb_dim = child_emb.shape[1]
        num_occ_child = child_emb.shape[0]
        num_occ_parent = parent_emb.shape[0]
        rv = multivariate_normal(np.zeros(emb_dim),
                                 np.diag(np.ones(emb_dim)) * sigma)
        dist = np.expand_dims(child_emb, 1) - np.expand_dims(parent_emb, 0)
        sim = rv.pdf(dist)
        sim = np.sum(np.log(np.sum(sim / num_occ_parent, axis=-1)))
        return sim

    if method == 'mixgau_fast':
        emb_dim = child_emb.shape[1]
        num_occ_child = child_emb.shape[0]
        num_occ_parent = parent_emb.shape[0]
        var = np.array([sigma] * emb_dim)
        # SHAPE: (num_occ_child, num_occ_parent, emb_dim)
        dist = np.expand_dims(child_emb, 1) - np.expand_dims(parent_emb, 0)
        dist = np.reshape(dist, (-1, emb_dim))
        denominator = -0.5 * (emb_dim *
                              (np.log(2 * np.pi)) + np.sum(np.log(var)))
        numerator = -0.5 * np.sum(dist**2 / np.expand_dims(var, 0), -1)
        log_prob = np.reshape(denominator + numerator,
                              (num_occ_child, num_occ_parent))
        log_prob_max = np.max(log_prob, -1, keepdims=True)
        sim = np.sum(
            np.log(
                np.sum(np.exp(log_prob - log_prob_max) / num_occ_parent,
                       axis=-1)) + log_prob_max)
        return sim

    if method == 'avg':
        child_emb = np.mean(child_emb, 0)
        parent_emb = np.mean(parent_emb, 0)
        return np.mean(cosine_similarity([child_emb], [parent_emb]))

    if method == 'avg_euc':
        child_emb = np.mean(child_emb, 0)
        parent_emb = np.mean(parent_emb, 0)
        return -np.mean(euclidean_distances([child_emb], [parent_emb]))

    if method == 'avg_dot':
        child_emb = np.mean(child_emb, 0)
        parent_emb = np.mean(parent_emb, 0)
        sim = (child_emb * parent_emb).sum()
        return sim

    if method == 'kde':
        if kde_c is None:
            kde_c = KernelDensity(kernel='gaussian',
                                  bandwidth=sigma).fit(child_emb)
        if kde_p is None:
            kde_p = KernelDensity(kernel='gaussian',
                                  bandwidth=sigma).fit(parent_emb)
        if kde_c_prob is None:
            kde_c_prob = kde_c.score_samples(child_emb)
        parent_prob = kde_p.score_samples(child_emb)
        kl = np.sum(np.exp(kde_c_prob) * (kde_c_prob - parent_prob))
        return -kl

    raise NotImplementedError