コード例 #1
0
def qEI(sigma, eps, mu, plugin, q):
    # hardcoded lower limit for multinormal pdf integration
    # todo
    # note that here we explicitly assume data is 2-d
    low = np.array([-20.0 for _ in range(q)])

    pk = np.zeros((q, ))
    first_term = np.zeros((q, ))
    second_term = np.zeros((q, ))
    for k in range(q):
        Sigma_k = covZk(sigma, k)

        mu_k = mu - mu[k]
        mu_k[k] = -mu[k]
        b_k = np.zeros((q, ))
        b_k[k] = -plugin

        # suspect that it is equal to pmnorm function of original R package
        zeros = np.array([0.0 for i in range(q)])
        p, _ = mvn.mvnun(low, b_k - mu_k, zeros, Sigma_k)
        pk[k] = p
        first_term[k] = (mu[k] - plugin) * pk[k]

        upper_temp = b_k + eps * Sigma_k[:, k] - mu_k
        p1, _ = mvn.mvnun(low, upper_temp, zeros, Sigma_k)
        second_term[k] = 1 / eps * (p1 - pk[k])
    expectedImprov = np.sum(first_term) + np.sum(second_term)
    return expectedImprov
コード例 #2
0
ファイル: Success.py プロジェクト: phonchi/SCA-Algorithms
    def verify(self):
        """ Verify our results against Yunsi Fei's results where she publishes her signal to noise ration and success rate curves
        """
        x = []
        yDPA = []
        yCPA = []

        # Amended Formula which works
        for i in range(1, 200, 20):
            cov, mean = self.conf.YCovMeanDPA(0.0016, 0.0046, i, correctKey=60)
            yDPA.append(
                mvn.mvnun(DESSuccess.lower, DESSuccess.upper, mean.reshape(63),
                          cov)[0])
            x.append(i)
            cov, mean = self.conf.YCovMeanCPA2(0.0016,
                                               0.0048,
                                               i,
                                               correctKey=60)
            yCPA.append(
                mvn.mvnun(self.lower, self.upper, mean.reshape(63), cov)[0])

        p1 = plot(xest, ySR1est, '--')
        p2 = plot(x, yCPA)
        legend(loc='lower right')
        ylabel('Success Rate')
        xlabel('Number of Measurements')
        show()
コード例 #3
0
ファイル: testr.py プロジェクト: Azizimj/GTSPbnd
def cal_c():
    from scipy.stats import mvn
    import numpy as np
    low = np.array([0, 0])
    upp = np.array([1, 1])
    mu = np.array([.5, .5])
    S = np.array([[1, 0], [0, 1]])
    p, i = mvn.mvnun(low, upp, mu, S)
    print(p, i)

    low = np.array([0, 0])
    upp = np.array([1, 1])
    mu = np.array([.5, .5])
    S = np.array([[.1, 0], [0, 0.1]])
    p, i = mvn.mvnun(low, upp, mu, S)
    print(p, i)

    low = np.array([-.5, -.5])
    upp = np.array([.5, .5])
    mu = np.array([0, 0])
    S = np.array([[1, 0], [0, 1]])
    p, i = mvn.mvnun(low, upp, mu, S)
    print(p, i)

    return p
コード例 #4
0
def EIBV_2D(Threshold_T, Threshold_S, mu, Sig, F, R):
    '''
    :param Threshold_T:
    :param Threshold_S:
    :param mu:
    :param Sig:
    :param F: sampling matrix
    :param R: noise matrix
    :return: EIBV evaluated at every point
    '''
    # Update the field variance
    a = np.dot(Sig, F.T)
    b = np.dot(np.dot(F, Sig), F.T) + R
    c = np.dot(F, Sig)
    Sigxi = np.dot(a, np.linalg.solve(b, c))  # new covariance matrix
    V = Sig - Sigxi  # Uncertainty reduction # updated covariance

    IntA = 0.0
    N = mu.shape[0]
    # integrate out all elements in the bernoulli variance term
    for i in np.arange(0, N, 2):

        # extract the corresponding variance reduction term
        SigMxi = Sigxi[np.ix_([i, i + 1], [i, i + 1])]

        # extract the corresponding mean terms
        Mxi = [mu[i], mu[i + 1]]  # temp and salinity

        sn2 = V[np.ix_([i, i + 1], [i, i + 1])]
        # vv2 = np.add(sn2, SigMxi) # was originally used to make it obscure
        vv2 = Sig[np.ix_([i, i + 1], [i, i + 1])]

        # compute the first part of the integration
        Thres = np.vstack((Threshold_T, Threshold_S))
        mur = np.subtract(Thres, Mxi)
        IntB_a = mvn.mvnun(np.array([[-np.inf], [-np.inf]]), np.zeros([2, 1]),
                           mur, vv2)[0]

        # compute the second part of the integration, which is squared
        mm = np.vstack((Mxi, Mxi))
        # SS = np.array([[vv2, SigMxi], [SigMxi, vv2]]) # thought of it as a simplier version
        SS = np.add(
            np.vstack((np.hstack((sn2, np.zeros(
                (2, 2)))), np.hstack((np.zeros((2, 2)), sn2)))),
            np.vstack((np.hstack((SigMxi, SigMxi)), np.hstack(
                (SigMxi, SigMxi)))))
        Thres = np.vstack((Threshold_T, Threshold_S, Threshold_T, Threshold_S))
        mur = np.subtract(Thres, mm)
        IntB_b = mvn.mvnun(
            np.array([[-np.inf], [-np.inf], [-np.inf], [-np.inf]]),
            np.zeros([4, 1]), mur, SS)[0]

        # compute the total integration
        IntA = IntA + np.nansum([IntB_a, -IntB_b])

    return IntA
コード例 #5
0
 def EIBV_1D(self, threshold, mu, Sig, F, R):
     Sigxi = Sig @ F.T @ np.linalg.solve(F @ Sig @ F.T + R, F @ Sig)
     V = Sig - Sigxi
     sa2 = np.diag(V).reshape(
         -1, 1)  # the corresponding variance term for each location
     IntA = 0.0
     for i in range(len(mu)):
         sn2 = sa2[i]
         m = mu[i]
         IntA = IntA + mvn.mvnun(-np.inf, threshold, m, sn2)[0] - mvn.mvnun(
             -np.inf, threshold, m, sn2)[0]**2
     return IntA
コード例 #6
0
def marginal_desviaciones2(x1, x2, p, rrho, factor, qq1, qq2, gg1, gg2, maxpts1, abseps1):
    pedazo1=np.zeros((1,1));
    pedazo2=np.zeros((1,1));
    pedazo3=np.zeros((1,1));
    pedazo4=np.zeros((1,1));
    pedazo5=np.zeros((1,1));
    pedazo6=np.zeros((1,1));
    upp=np.zeros((1,2));
    mu=np.zeros((2,1));
    #test = np.inf;
    test = -30;
    low=np.array([test,test]);
    rhoi=(qq1*(qq2*rrho));
    Sigma1=np.array([[1,rhoi],[rhoi,1]]);
    #MX=np.asmatrix(np.dot(np.transpose(x),x));
    q1XB1=(qq1*x1);
    q2XB2=(qq2*x2);
    upp=np.array([q1XB1,q2XB2]);
    mu=np.array([0,0]);
    phi2,inform = mvn.mvnun(low,upp,mu,Sigma1);
    #phi2=mvstdnormcdf(low,upp,Sigma1,maxpts1,abseps1,abseps1);
    if phi2<=abseps1:
       phi2=abseps1;
    mphi2=normpdf(upp, mu, Sigma1, 2);
    pedazo1=((q1XB1*gg1)/phi2)-((rhoi*mphi2)/phi2)-((gg1**2)/(phi2**2));
    pedazo2=((q2XB2*gg2)/phi2)-((rhoi*mphi2)/phi2)-((gg2**2)/(phi2**2));
    pedazo3=(mphi2/phi2)-((gg1*gg2)/(phi2**2));
    pedazo4=qq2*((mphi2/phi2)*(rhoi*(factor*(factor*(q2XB2-rhoi*q1XB1)))-q1XB1-(gg1/phi2)));
    pedazo5=qq1*((mphi2/phi2)*(rhoi*(factor*(factor*(q1XB1-rhoi*q2XB2)))-q2XB2-(gg2/phi2)));
    pedazo6=(mphi2/phi2)*(((factor**2)*rhoi)*(1-(factor**2)*(((x1**2)+(x2**2))-2*rhoi*(q1XB1*q2XB2)))+((factor*(q1XB1*q2XB2))-(mphi2/phi2)));
    return pedazo1, pedazo2, pedazo3, pedazo4, pedazo5, pedazo6;        
コード例 #7
0
ファイル: copulacdf.py プロジェクト: xiyueyiwan/copula-py
def _gaussian(u, rho):
    """ Generates values of the Gaussian copula
    
    Inputs:
    u -- u is an N-by-P matrix of values in [0,1], representing N
         points in the P-dimensional unit hypercube.  
    rho -- a P-by-P correlation matrix.
    
    Outputs:
    y -- the value of the Gaussian Copula
    """
    n = u.shape[0]
    p = u.shape[1]
    lo = np.full((1, p), -10)
    hi = norm.ppf(u)

    mu = np.zeros(p)

    # need to use ppf(q, loc=0, scale=1) as replacement for norminv
    # need to use mvn.mvnun as replacement for mvncdf
    # the upper bound needs to be the output of the ppf call, right now it is set to random above
    y = np.zeros(n)
    # I don't know if mvnun is vectorized, I couldn't get that to work
    for ii in np.arange(n):
        # do some error checking.  if we have any -inf or inf values,
        #
        p, i = mvn.mvnun(lo, hi[ii, :], mu, rho)
        y[ii] = p

    return y
コード例 #8
0
	def estimatePartitionFunction(K, mean, cov):
		# https://github.com/scipy/scipy/blob/v1.2.1/scipy/stats/_multivariate.py
		# line 525, def _cdf
		assert cov.shape[1:] == (K, K)
		M = len(cov)
		assert mean.shape[1:] == (M, K)
		N = len(mean)

		arr = np.empty([N, M], dtype=np.float)
		for a, m, c in zip(arr.T, mean.transpose(1, 0, 2), cov):
			a[:] = [
				mvn.mvnun(
					lower=-x, upper=np.full(K, np.inf), means=np.zeros(K), covar=c,
					maxpts=1000000 * K,
					abseps=1e-6,
					releps=1e-6,
				)[0]
				for x in m
			]
		# [mvn.mvnun(lower=-m, upper=np.full(K, np.inf), means=np.zeros(K), covar=cov)[0] for m in mean]
		# [mvn.mvnun(lower=np.zeros(K), upper=np.full(K, np.inf), means=m, covar=cov)[0] for m in mean]
		# [mvn.mvnun(lower=np.full(K, -np.inf), upper=m, means=np.zeros(K), covar=cov)[0] for m in mean]
		# multivariate_normal(mean=None, cov=cov).cdf(mean)
		# multivariate_normal(mean=np.zeros(K), cov=cov).cdf(mean)
		# [multivariate_normal(mean=-m, cov=cov).cdf(np.zeros(K)) for m in mean]

		arr = torch.tensor(arr, dtype=dtype, device=device)
		return arr
コード例 #9
0
ファイル: mykde.py プロジェクト: dlaw/psf
    def integrate_box(self, low_bounds, high_bounds, maxpts=None):
        """Computes the integral of a pdf over a rectangular interval.

        Parameters
        ----------
        low_bounds : vector
            lower bounds of integration
        high_bounds : vector
            upper bounds of integration
        maxpts=None : int
            maximum number of points to use for integration

        Returns
        -------
        value : scalar
            the result of the integral
        """
        if maxpts is not None:
            extra_kwds = {'maxpts': maxpts}
        else:
            extra_kwds = {}

        value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
            self.covariance, **extra_kwds)
        if inform:
            msg = ('an integral in mvn.mvnun requires more points than %s' %
                (self.d*1000))
            warnings.warn(msg)

        return value
コード例 #10
0
def PIBV(loc_pilot, mu_upd, Q_upd, Mvar):

    IntA = 0.0
    L = cholesky(Q_upd)
    for ind in loc_pilot:  # loop through 53 pilot points
        mvar = Mvar[ind]
        mu = mu_upd[ind]
        e = np.zeros([N, 1])  # unit vector to find sigma
        e[ind] = True
        Sigma = L.solve_A(e)
        Cov = np.array([mvar, mvar - Sigma[ind], mvar - Sigma[ind],
                        mvar]).reshape(2, 2)
        IntA = IntA + mvn.mvnun(-np.inf, Thres, mu, mvar)[0] - \
               mvn.mvnun(np.array([[-np.inf], [-np.inf]]), np.ones([2, 1]) * Thres, np.ones([2, 1]) * mu, Cov)[0]

    return IntA
コード例 #11
0
    def integrate_box(self, low_bounds, high_bounds, maxpts=None):
        """Computes the integral of a pdf over a rectangular interval.

        Parameters
        ----------
        low_bounds : vector
            lower bounds of integration
        high_bounds : vector
            upper bounds of integration
        maxpts=None : int
            maximum number of points to use for integration

        Returns
        -------
        value : scalar
            the result of the integral
        """
        if maxpts is not None:
            extra_kwds = {'maxpts': maxpts}
        else:
            extra_kwds = {}

        value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
                                  self.covariance, **extra_kwds)
        if inform:
            msg = ('an integral in mvn.mvnun requires more points than %s' %
                   (self.d * 1000))
            warnings.warn(msg)

        return value
コード例 #12
0
    def integrate_box(self, low_bounds, high_bounds, maxpts=None):
        """Computes the integral of a pdf over a rectangular interval.

        Parameters
        ----------
        low_bounds : array_like
            A 1-D array containing the lower bounds of integration.
        high_bounds : array_like
            A 1-D array containing the upper bounds of integration.
        maxpts : int, optional
            The maximum number of points to use for integration.

        Returns
        -------
        value : scalar
            The result of the integral.

        """
        if maxpts is not None:
            extra_kwds = {'maxpts': maxpts}
        else:
            extra_kwds = {}

        value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
                                  self.covariance, **extra_kwds)
        if inform:
            msg = ('An integral in mvn.mvnun requires more points than %s' %
                   (self.d * 1000))
            warnings.warn(msg)

        return value
コード例 #13
0
def GaussianUniformIntegral(x_range,
                            y_range,
                            x_range_shift,
                            y_range_shift,
                            cov,
                            num_x_grid=3,
                            num_y_grid=3):
    '''
		Exact equation: 1 / (size of x_range) * 1 / (size of y_range) * integral_{box of x_range y range} dxdy integral_{box of x_range_shift y_range_shift} dxshift dyshift N(xshift - x, yshift - y | cov)
		Approximation: the uniform distribution using grids in x_range and y_range
						1 / (number x grids) * 1 / (number y grids) * sum_{x grids} sum_{y grids} integral dxshift dyshift N(xgrid - xshift, ygrid - yshift | cov)
	'''
    value = 0
    size_x_grid = (x_range[1] - x_range[0]) / (num_x_grid + 1)
    size_y_grid = (y_range[1] - y_range[0]) / (num_y_grid + 1)
    for xgrid in np.arange(x_range[0] + size_x_grid, x_range[1], size_x_grid):
        for ygrid in np.arange(y_range[0] + size_y_grid, y_range[1],
                               size_y_grid):
            prob, _ = mvn.mvnun(
                np.array([x_range_shift[0] - xgrid, y_range_shift[0] - ygrid]),
                np.array([x_range_shift[1] - xgrid, y_range_shift[1] - ygrid]),
                np.zeros(2), cov)
            value += prob
    value /= (num_x_grid * num_y_grid)
    return value
コード例 #14
0
    def _cdf(self, x, mean, cov, maxpts, abseps, releps):
        """
        Parameters
        ----------
        x : ndarray
            Points at which to evaluate the cumulative distribution function.
        mean : ndarray
            Mean of the distribution
        cov : array_like
            Covariance matrix of the distribution
        maxpts: integer
            The maximum number of points to use for integration
        abseps: float
            Absolute error tolerance
        releps: float
            Relative error tolerance

        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'cdf' instead.

        .. versionadded:: 1.0.0

        """
        lower = np.full(mean.shape, -np.inf)
        # mvnun expects 1-d arguments, so process points sequentially
        func1d = lambda x_slice: mvn.mvnun(lower, x_slice, mean, cov, maxpts,
                                           abseps, releps)[0]
        out = np.apply_along_axis(func1d, -1, x)
        return _squeeze_output(out)
コード例 #15
0
ファイル: f_particle.py プロジェクト: SiChiTong/iarc-2017
 def p(self):
     # TODO : hard-coded error margin
     cov = self.ukf.P[:3, :3]  # ignore velocity components ( for now )
     # error margin, +- 0.5m, 30 deg.
     d = np.asarray([0.5, 0.5, np.deg2rad(30)])
     _p, _ = mvn.mvnun(-d, d, np.zeros_like(d), cov)
     return _p
コード例 #16
0
ファイル: copulacdf.py プロジェクト: lessc0de/copula-py
def _gaussian(u, rho):
    """ Generates values of the Gaussian copula
    
    Inputs:
    u -- u is an N-by-P matrix of values in [0,1], representing N
         points in the P-dimensional unit hypercube.  
    rho -- a P-by-P correlation matrix.
    
    Outputs:
    y -- the value of the Gaussian Copula
    """
    n  = u.shape[0]
    p  = u.shape[1]
    lo = np.full((1,p), -10)
    hi = norm.ppf(u)
    
    mu = np.zeros(p)
    
    # need to use ppf(q, loc=0, scale=1) as replacement for norminv
    # need to use mvn.mvnun as replacement for mvncdf
    # the upper bound needs to be the output of the ppf call, right now it is set to random above
    y = np.zeros(n)
    # I don't know if mvnun is vectorized, I couldn't get that to work
    for ii in np.arange(n):
        # do some error checking.  if we have any -inf or inf values,
        # 
        p,i = mvn.mvnun(lo, hi[ii,:], mu, rho)
        y[ii] = p
    
    return y
コード例 #17
0
 def computeProbability(self, x, G, g):
     # compute probability of violating the constraints
     self.m_in = G.shape[0]
     self.Sigma_eG = np.dot(np.dot(G, self.Sigma), G.transpose())
     low = np.array(-100 * np.sqrt(np.diag(self.Sigma_eG)))
     ineq_margins = np.dot(G, x) + g
     p, i = mvn.mvnun(low, ineq_margins, np.zeros(self.m_in), self.Sigma_eG)
     return p
コード例 #18
0
def M_MVN(a,b,c):
    from scipy.stats import mvn
    mean = np.array([0,0])
    Sigma = np.array([[1,c],[c,1]])
    lower=np.array([-10000,-10000])
    upper=np.array([a,b])
    p,i=mvn.mvnun(lower,upper,mean,Sigma)    
    return p
コード例 #19
0
def getDefaultCorAB(A, B, muA, muB, sigmaA, sigmaB, rhoAB, dt, KA, KB):
    dA = getDelta(muA, sigmaA, dt, A, KA)
    dB = getDelta(muB, sigmaB, dt, B, KB)
    pA = norm.cdf(dA)
    pB = norm.cdf(dB)
    pAB, err = mvn.mvnun(np.array([-100, -100]), np.array([dA, dB]),
                         np.array([0, 0]), np.array([[1, rhoAB], [rhoAB, 1]]))
    return np.divide(pAB - pA * pB, np.sqrt(pA * pB * (1 - pA) * (1 - pB)))
	def subproc(self,args):

		sub_mu = args[0]
		sub_cov = args[1]
		sub_var = np.diag(sub_cov)
		fstar = args[2]

		grid_num = 100
		J = np.shape(sub_var)[0]-1

		##### compute 2nd term from here #####
		upper = np.min(np.array([sub_mu + 5*np.sqrt(sub_var), fstar*np.ones(J+1)]),axis=0)
		lower = np.min(np.array([sub_mu - 5*np.sqrt(sub_var), fstar*np.ones(J+1)]),axis=0)

		Z = mvn.mvnun(lower,upper,sub_mu,sub_cov)[0]

		if Z > 0:
			if J < 3:
				##### compute conditional Entropy by Division quadrature #####
				grid = []
				for j in range(J+1):
					jth_space = np.unique(np.linspace(lower[j],upper[j],grid_num))[:,np.newaxis]
					grid.append(jth_space)

				grid = yz.gen_mesh(grid)
				H = np.array(multivariate_normal.pdf(grid,mean=sub_mu, cov=sub_cov)) # compute pdf

				H = H[H >= 1e-9]

				hyper_volume = np.prod(upper-lower)
				ce = ((1/Z)*(hyper_volume/(grid_num**(J+1)))*np.sum(-H*np.log(H)) + np.log(Z)) # integrate

			else:
				##### compute conditional Entropy by Monte calro simulation #####
				sampleN = 0
				samples = np.atleast_2d(sub_mu)

				while 1:
					s = np.random.multivariate_normal(sub_mu,sub_cov,1000)
					sampleN += 1000
					##### check if samples is in the truncated area #####
					for j in range(J+1):
						in_area = (lower[j] <= s[:,j]) & (s[:,j] <= upper[j])
						s = s[in_area,:]
					samples = np.r_[samples,s]

					if np.shape(samples)[0] < 50000:
						break

				H = multivariate_normal.pdf(samples,mean=sub_mu, cov=sub_cov)
				H = H[H >= 1e-9]

				ce = (1/Z)*((1/sampleN)*np.sum(-np.log(H))) + np.log(Z) # integrate
		else:
			ce
			pass

		return ce
コード例 #21
0
ファイル: Success.py プロジェクト: phonchi/SCA-Algorithms
 def TheoreticalCPA(self, numTraces):
     """
     Returns the DES, CPA Success Rate (calculated using Yunsi Fei's Confusion Analysis formula) for the indicated number of traces
     """
     cov, mean = self.conf.YCovMeanCPA2(self.signal,
                                        self.noise,
                                        numTraces,
                                        correctKey=self.correctSubKey)
     return mvn.mvnun(self.lower, self.upper, mean.reshape(63), cov)[0]
コード例 #22
0
    def bboxes2gt_normal(classes,
                         input_shape,
                         bboxes,
                         grid_division,
                         std_ratio=(6, 6)):
        """This method

        :param list classes: list of strings, the names of the classes
        :param tuple input_shape: shape of the image
        :param np.ndarray bboxes: list of bboxes in the format class_id, xmin, ymin, xmax, ymax
        :param grid_division: division in cells of the image (x_axis, y_axis)
        :param std_ratio: tuple to compute std along the 2 axes, it is calculated as side_length/std_ratio

        :return: array with the count per class per cell
        :rtype: np.ndarray
        """

        grids = GTGen.get_grids(input_shape, grid_division)
        counter = np.zeros((grids.shape[0], len(classes)))

        for b in range(len(bboxes)):
            bbox = bboxes[b]
            # Define mu and covariance matrix
            mu = np.array([(bbox[3] - bbox[1]) / 2 + bbox[1],
                           (bbox[4] - bbox[2]) / 2 + bbox[2]])
            S = np.array([[((bbox[3] - bbox[1]) / std_ratio[0])**2, 0],
                          [0, ((bbox[4] - bbox[2]) / std_ratio[1])**2]])
            norm_coeff = mvn.mvnun(np.array(bbox[1:3]), np.array(bbox[3:]), mu,
                                   S)[0]

            if bbox[1] > input_shape[1] or bbox[3] > input_shape[1] or bbox[2] > input_shape[0] or bbox[4] > \
                    input_shape[0]:
                raise ValueError(
                    "Bounding Boxes coordinates exceeds the image boundaries")
            if any(bbox) < 0:
                raise ValueError("Found negative Bounding Boxes coordinates")
            for i in range(grids.shape[0]):
                low, upp = GTUtils.coordinates_intersection(grids[i], bbox[1:])
                if low is None:
                    continue
                counter[i, int(bbox[0])] += mvn.mvnun(low, upp, mu,
                                                      S)[0] / norm_coeff

        return counter
コード例 #23
0
def gaussian_testRectangle():
    """
    Test integral of bvn over polygon
    Test case:
        zero mean and unit covariance 2D Gaussian
        area with [xlim, ylim] is rectangle boundary from 0 to 1
    Method used:
        scipy.stats.mvnmvnun: closed form solution for rectangle bound
        scipy.integrate.dblquad: double integral
    """
    from scipy.stats import mvn
    import scipy.integrate as integrate
    import time
    # set values
    mean = np.array([0, 0])
    cov = np.eye(2)

    # mvnun
    start_mvnun = time.time()
    lower = np.array([0, 0])
    upper = np.array([1, 1])

    area_mvnun, _ = mvn.mvnun(lower, upper, mean, cov)
    time_mvnun = time.time() - start_mvnun

    # dblquad
    def pdfDBL(x, y, mean, cov):
        x1 = x - mean[0]
        x2 = y - mean[1]
        detC = cov[0, 0] * cov[1, 1] - cov[0, 1] * cov[1, 0]
        e = (cov[1, 1] * (x1**2) +
             (-cov[0, 1] - cov[1, 0]) * x1 * x2 + cov[0, 0] * (x2**2)) / detC
        return 1 / (2 * np.pi * np.sqrt(abs(detC))) * np.exp(-0.5 * e)

    start_dblquad = time.time()
    area_dblquad = integrate.dblquad(lambda x, y: pdfDBL(x, y, mean, cov),
                                     lower[0], upper[0], lambda x: lower[1],
                                     lambda x: upper[1])[0]
    time_dblquad = time.time() - start_dblquad

    # polygon triangulation method
    start_poly = time.time()
    poly = np.array([[lower[0], lower[1]], [upper[0], lower[1]],
                     [upper[0], upper[1]], [lower[0], upper[1]]])
    area_poly = polyIntegratePdf(poly, mean, cov, eps=1.0e-5)
    time_poly = time.time() - start_poly

    # print output
    print("Test integral over rectangle bound :")
    print("Scipy mvnun: {:5.6f}".format(area_mvnun),
          " | time: {:6.9f} s".format(time_mvnun))
    print("Scipy dblquad: {:5.6f}".format(area_dblquad),
          " | time: {:6.9f} s".format(time_dblquad))
    print("Polygon integral: {:5.6f}".format(area_poly),
          " | time: {:6.9f} s".format(time_poly),
          " \nerror: {:5.8f}".format(abs(area_mvnun - area_poly)))
コード例 #24
0
ファイル: f_particle.py プロジェクト: SiChiTong/iarc-2017
    def match(self, p2):
        # TODO : incorporate velocities
        # TODO : consider color[r/g] and type[t/o], when provided
        p1 = self.as_vec()
        p2 = p2.as_vec()
        d = np.abs(ukf_residual(p1, p2))
        cov = np.diag(self.SIGMAS[:3])
        # assume independent x-y-t
        p, _ = mvn.mvnun(d, [20, 20, np.pi], np.zeros_like(d), cov)

        return p * (2**3)  # account for quartiles
コード例 #25
0
ファイル: truncMVN.py プロジェクト: zachjennings/truncMVN
 def normalize(self,mean=None,cov=None,low=None,high=None):
     """
     Calculate normalization term for the truncated MVN.
     
     Involves calculation of four CDF terms:
     P(4) - P(3) - P(2) + P(1)
     """
     #calculate CDF of full region
     cdf_4,i = mvn.mvnun(self.low_int_limit,high,mean,cov)
     
     #calculate CDFs of outside regions
     cdf_3,i = mvn.mvnun(self.low_int_limit,np.array([high[0],low[1]]),mean,cov)
     cdf_2,i = mvn.mvnun(self.low_int_limit,np.array([low[0],high[1]]),mean,cov)
     
     #calculate CDF of lower-left corner region
     cdf_1,i = mvn.mvnun(self.low_int_limit,low,mean,cov)
     
     reg_prob = (cdf_4 - cdf_3 - cdf_2 + cdf_1)
             
     return 1./reg_prob
コード例 #26
0
def compute_univariate_marginal(x, k, sigma, a, b):
    """Compute the univariate marginal of a normally distributed dataset.
    
    Args:
        x (1D numpy array): The k-th dimension of the dataset.
        sigma (2D numpy array): The covariance matrix of the density function.
        k (int): The index of the target dimension.
        a (1D numpy array): The truncation lower bound.
        b (1D numpy array): The truncation upper bound.
        
    Returns:
        Fk (1D numpy array): The calculated first moment.
    """
    # Basic initialization
    N = x.shape[0]
    D = sigma.shape[0]
    Fk = np.zeros([N]) 

    # CASE 1: If x = inf or -inf, then Fk = 0
    # Find infinity elements
    idx = ~np.isinf(x) 
    if np.sum(~idx):
        Fk[~idx] = 0

    if (np.sum(idx) == 0):
        return Fk

    # CASE 2: Check if the dataset is univariate (D = 1)
    if D == 1:
        Fk[idx] = norm.pdf(x[idx], 0, np.sqrt(float(sigma)))
        return Fk

    # CASE 3: Consider the case of multivariate dataset (D > 1)    
    o = np.zeros([D], dtype=np.bool)
    o[k] = True
    m = ~o

    # Get the mean and covarance excepted the k-th dimension
    cmu = (sigma[m, :][:, o] / sigma[o, :][:, o] * x[idx]).T
    csig = sigma[m, :][:, m] - sigma[m, :][:, o] / sigma[o, :][:, o] *\
        sigma[o, :][:, m]

    # Estimate the value of cumulative distribution function for each dimension
    cdf = np.zeros([a[idx, :].shape[0], 1])
    for i in range(a[idx, :].shape[0]):
        cdf[i] = mvn.mvnun((a[idx, :][:, m] - cmu)[i], 
              (b[idx, :][:, m] - cmu)[i], 
              np.zeros([D-1]), np.squeeze(csig))[0]
    
    # Calculate the the first moment for each dimension
    Fk[idx] = (norm.pdf(x[idx], 0, np.squeeze(np.sqrt(sigma[o, :][:, o]))).\
        reshape([-1, 1]) * cdf).reshape([-1])

    return Fk
コード例 #27
0
    def computeIndividualProbabilities2(self, x, G, g):
        self.m_in = G.shape[0]
        R = np.zeros(self.m_in)
        for i in range(self.m_in):
            Sigma_eG = np.dot(np.dot(G[i, :], self.Sigma), G[i, :].transpose())
            low = np.array([-100 * np.sqrt(Sigma_eG)])
            ineq_margins = np.dot(G[i, :], x) + g[i]
            p, dummy = mvn.mvnun(low, ineq_margins, 0.0, Sigma_eG)
            R[i] = p
#            print "Ineq %d, p(%f<x<%f) = %.2f,\t sigma %f" % (i, low[0], ineq_margins, p, Sigma_eG);
        return R
コード例 #28
0
def EIBV_1D(threshold, mu, Sig, F, R):
    '''
    :param threshold:
    :param mu:
    :param Sig:
    :param F: sampling matrix
    :param R: noise matrix
    :return: EIBV evaluated at every point
    '''
    Sigxi = Sig @ F.T @ np.linalg.solve(F @ Sig @ F.T + R, F @ Sig)
    V = Sig - Sigxi
    sa2 = np.diag(V).reshape(
        -1, 1)  # the corresponding variance term for each location
    IntA = 0.0
    for i in range(len(mu)):
        sn2 = sa2[i]
        m = mu[i]
        IntA = IntA + mvn.mvnun(-np.inf, threshold, m, sn2)[0] - mvn.mvnun(
            -np.inf, threshold, m, sn2)[0]**2
    return IntA
コード例 #29
0
def cal_c():
    from scipy.stats import mvn
    import numpy as np
    low = np.array([0, 0])
    upp = np.array([1, 1])
    mu = np.array([.5, .5])
    sigx = 1
    S = np.array([[sigx, 0], [0, sigx]])
    scale = 1
    p, i = mvn.mvnun(low, upp, mu, S)
    # print(p, i)
    return p*scale
コード例 #30
0
def _mvn_un(rho, lower, upper):
    """
    Perform integral of bivariate normal gauss with correlation

    Integral is performed using scipy's mvn library.
    
    :returns float: integral value
    """
    mu = np.array([0., 0.])
    S = np.array([[1., rho], [rho, 1.0]])
    p, i = mvn.mvnun(lower, upper, mu, S)
    return p
コード例 #31
0
def ExpectedVarianceUsr(threshold, mu, Sig, F, R):
    '''
    :param threshold:
    :param mu:
    :param Sig:
    :param F: sampling matrix
    :param R: noise matrix
    :return:
    '''
    Sigxi = np.dot(Sig, np.dot(F.T, np.linalg.solve(np.dot(F, np.dot(Sig, F.T)) + R, np.dot(F, Sig))))
    V = Sig - Sigxi
    sa2 = np.diag(V).reshape(-1, 1) # the corresponding variance term for each location
    IntA = 0.0
    for i in range(len(mu)):
        sn2 = sa2[i]
        sn = np.sqrt(sn2) # the corresponding standard deviation term
        m = mu[i]
        # mur = (threshold - m) / sn
        IntA = IntA + mvn.mvnun(-np.inf, threshold, m, sn2)[0] - mvn.mvnun(-np.inf, threshold, m, sn2)[0] ** 2

    return IntA
コード例 #32
0
ファイル: Success.py プロジェクト: alvincai/SCA-Algorithms
    def verify(self):
        """ Verify our results against Yunsi Fei's results where she publishes her signal to noise ration and success rate curves
        """
        x = []
        yDPA = []
        yCPA = []

        # Amended Formula which works
        for i in range(1,200,20):
            cov, mean = self.conf.YCovMeanDPA(0.0016, 0.0046, i, correctKey=60)
            yDPA.append(mvn.mvnun(DESSuccess.lower, DESSuccess.upper, mean.reshape(63), cov)[0])
            x.append(i)
            cov, mean = self.conf.YCovMeanCPA2(0.0016, 0.0048, i, correctKey=60)
            yCPA.append(mvn.mvnun(self.lower, self.upper, mean.reshape(63), cov)[0])

        p1 = plot(xest,ySR1est,'--')
        p2 = plot(x,yCPA)
        legend(loc='lower right')
        ylabel('Success Rate')
        xlabel('Number of Measurements')
        show()
コード例 #33
0
ファイル: fastslam.py プロジェクト: hayashi-lab-soma/soma_pkg
def delete_features(features, pose, observations, noise, min_visibility,
                    max_visibility, threshold):
    new_features = []
    for f in features:
        mu = f[0].transpose()[0]
        xf, yf = mu
        d = sqrt((xf - pose[0])**2 + (yf - pose[1])**2)

        if d > min_visibility and d < max_visibility:
            highest_likelihood = 0
            for o in observations:
                phi = atan2(yf - pose[1], xf - pose[0]) - pose[2]

                d_noise, phi_noise = noise
                d_sigma = d_noise[0] * d + d_noise[1] * abs(phi) + d_noise[2]
                phi_sigma = phi_noise[0]*d + \
                    phi_noise[1]*abs(phi) + phi_noise[2]
                sigma = [[d_sigma, 0], [0, phi_sigma]]

                if abs(o[1] - phi) > abs(o[1] - phi + 2 * pi):
                    phi -= 2 * pi
                elif abs(o[1] - phi) > abs(o[1] - phi - 2 * pi):
                    phi += 2 * pi

                d_interval, phi_interval = d_sigma / 10, phi_sigma / 10

                if d_sigma == 0 or phi_sigma == 0:
                    if o[0] == d and o[1] == phi:
                        feature_likelihood = 1
                    else:
                        feature_likelihood = 0

                else:
                    feature_likelihood = mvnun(
                        np.array([
                            o[0] - d_interval / 2.0, o[1] - phi_interval / 2.0
                        ]),
                        np.array([
                            o[0] + d_interval / 2.0, o[1] + phi_interval / 2.0
                        ]), np.array([d, phi]), np.array(sigma))[0]

                assert feature_likelihood <= 1, feature_likelihood

                if feature_likelihood > highest_likelihood:
                    highest_likelihood = feature_likelihood

            if highest_likelihood > threshold:
                new_features.append(f)

        else:
            new_features.append(f)

    return new_features
コード例 #34
0
ファイル: fastslam.py プロジェクト: hayashi-lab-soma/soma_pkg
def correspondence(features, pose, observation, min_visibility, max_visibility,
                   noise, threshold):
    if len(features) == 0:
        return [], []

    corresponding_features = len(features) * [0]
    x, y, theta = pose.transpose()[0]

    for i, f in enumerate(features):
        mu = f[0].transpose()[0]
        xf, yf = mu
        d = sqrt((xf - x)**2 + (yf - y)**2)
        phi = atan2(yf - y, xf - x) - theta

        if d > min_visibility and d < max_visibility:
            d_noise, phi_noise = noise
            d_sigma = d_noise[0] * d + d_noise[1] * abs(phi) + d_noise[2]
            phi_sigma = phi_noise[0] * d + phi_noise[1] * abs(
                phi) + phi_noise[2]
            sigma = [[d_sigma, 0], [0, phi_sigma]]

            if abs(observation[1] - phi) > abs(observation[1] - phi + 2 * pi):
                phi -= 2 * pi
            elif abs(observation[1] - phi) > abs(observation[1] - phi -
                                                 2 * pi):
                phi += 2 * pi

            d_interval, phi_interval = d_sigma / 10, phi_sigma / 10
            feature_likelihood = mvnun(
                np.array([
                    observation[0] - d_interval / 2.0,
                    observation[1] - phi_interval / 2.0
                ]),
                np.array([
                    observation[0] + d_interval / 2.0,
                    observation[1] + phi_interval / 2.0
                ]), np.array([d, phi]), np.array(sigma))[0]

            assert feature_likelihood <= 1, "Probability greater than 1 !"

            if feature_likelihood > threshold:
                corresponding_features[i] = feature_likelihood

    corresponding_likelihoods = list(np.sort(np.array(corresponding_features)))
    corresponding_features = list(np.argsort(np.array(corresponding_features)))
    corresponding_likelihoods.reverse()
    corresponding_features.reverse()
    for i, e in enumerate(corresponding_likelihoods):
        if e == 0:
            corresponding_features[i] = None

    return corresponding_features, corresponding_likelihoods
コード例 #35
0
def phi_func(rho, K):
    '''Here we define the phi as a function of rho and K'''

    # construct an array of covariance matrices for each rho
    COV = np.array([[[1, r], [r, 1]] for r in rho])

    # scipy doesn't offer a survival function (i.e. complementary cdf), so we have to build it
    threshold = np.array([K, K])
    upper = np.array([100, 100])
    nom_phi = np.array(
        [mvn.mvnun(threshold, upper, mean, cov)[0] for cov in COV])

    return nom_phi / (1 - norm.cdf(K))
コード例 #36
0
def funcion_margina1_loglike2(x, y, qq1, qq2, rho, maxpts1, abseps1):
    auxiliar=qq1*qq2*rho;
    Sigma1=np.array([[1,auxiliar],[auxiliar,1]]);
    #test = -1*np.inf;
    test = -30;
    low=np.array([test,test]);
    uppaux=np.array([qq1*x,qq2*y]);
    #phi2=mvstdnormcdf(low,uppaux,Sigma1,maxpts1,abseps1,abseps1);
    mu=np.array([0,0]);
    phi2,inform = mvn.mvnun(low,uppaux,mu,Sigma1);
    if phi2<=abseps1:
       phi2=abseps1;
    return np.log(phi2);
コード例 #37
0
def calculo_margina1_v4(x, y, qq1, qq2, rho, factor, maxpts1, abseps1):
    Sigma1=np.array([[1,rho],[rho,1]]);
    #test = np.inf;
    test=30;
    x0=-1*x;
    y0=-1*y;
    lowaux=np.array([-1*test,-1*test]);
    uppaux=np.array([x0,y0]);
    mu=np.array([0,0]);
    phi2,inform = mvn.mvnun(lowaux,uppaux,mu,Sigma1);
     #phi2=mvstdnormcdf(lowaux,uppaux,Sigma1,maxpts1,abseps1,abseps1);
    if phi2<=abseps1:
       phi2=abseps1;
    v1=x-(1/phi2)*((stats.norm._pdf(x0)*(stats.norm._cdf((y0+rho*x)*factor)))+rho*(stats.norm._pdf(y0)*(stats.norm._cdf((x0+rho*y)*factor))));
    v2=y-(1/phi2)*((stats.norm._pdf(y0)*(stats.norm._cdf((x0+rho*y)*factor)))+rho*(stats.norm._pdf(x0)*(stats.norm._cdf((y0+rho*x)*factor)))); 
    return v1, v2;
コード例 #38
0
def marginal_score2(p, x, x1, x2, rho1, qqq1, qqq2, ggg1, ggg2, maxpts1, abseps1):
    summa_rho=np.zeros((1, 1));
    summa1=np.zeros((1, p));
    summa2=np.zeros((1, p));
    #test = -1*np.inf;
    test = -30;
    low=np.array([test,test]);
    mu=np.zeros((2,1));
    upp=np.zeros((2,1));
    auxiliar=qqq1*qqq2*rho1;
    Sigma1=np.array([[1,auxiliar],[auxiliar,1]]);
    upp=np.array([qqq1*x1,qqq2*x2]);
    phi2,inform = mvn.mvnun(low,upp,mu,Sigma1);
    #phi2=mvstdnormcdf(low,upp,Sigma1,maxpts1,abseps1,abseps1);
    if phi2<=abseps1:
       phi2=abseps1;       
    summa1=(((qqq1*ggg1)/phi2)*(x));
    summa2=(((qqq2*ggg2)/phi2)*(x));
    summa_rho=((qqq1*(qqq2*normpdf(upp, mu, Sigma1, 2)))/phi2);
    return summa1, summa2, summa_rho;
コード例 #39
0
ファイル: skellam.py プロジェクト: jpceia/maxlike
def gauss_bivar(x, y, rho):
    return mvnun(-999 * np.ones((2)), (x, y), (0, 0), np.array([[1, rho], [rho, 1]]))[0]
コード例 #40
0
    if(np.any(N>0)):
        i = np.argmin(N[N>0])
        p_emp_IS = M[N>0][i]
        erreur_emp_IS = N[N>0][i]
        var_emp_IS = P[N>0][i]
    
    C.append(p_emp_IS)
    D.append(erreur_emp_IS)
    F.append(var_emp_IS)

low = epsilon * np.ones(npoint)
upp = 100 * np.ones(npoint)
P = []
for distance in np.linspace(0,8,100):
    mean = distance * np.ones(npoint)
    p,i = mvn.mvnun(low,upp,mean,cov)
    P.append(1-p)

latexify()
plt.figure()
plt.grid(True)
plt.semilogy(np.linspace(0, 8, 20), A, 'rx', label = 'MC')
plt.semilogy(np.linspace(0, 8, 20), C, 'b.', label ='IS')
plt.semilogy(np.linspace(0, 8, 100), P, 'k', label ='num')
plt.xlabel("Separation distance")
plt.ylabel("Probability")
plt.legend()
plt.savefig('Outputs/Script_8_ISmc_1.pdf', bbox_inches='tight')

plt.figure()
plt.grid(True)
コード例 #41
0
def mutual_proximity_gauss(D: np.ndarray, metric: str = "distance", test_set_ind: np.ndarray = None, verbose: int = 0):
    """Transform a distance matrix with Mutual Proximity (normal distribution).
    
    Applies Mutual Proximity (MP) [1]_ on a distance/similarity matrix. Gauss 
    variant assumes dependent normal distributions (VERY SLOW).
    The resulting second. distance/similarity matrix should show lower hubness.
    
    Parameters
    ----------
    D : ndarray
        - ndarray: The ``n x n`` symmetric distance or similarity matrix.
    
    metric : {'distance', 'similarity'}, optional (default: 'distance')
        Define, whether matrix `D` is a distance or similarity matrix.
        
    test_sed_ind : ndarray, optional (default: None)
        Define data points to be hold out as part of a test set. Can be:
        
        - None : Rescale all distances
        - ndarray : Hold out points indexed in this array as test set. 
        
    verbose : int, optional (default: 0)
        Increasing level of output (progress report).
        
    Returns
    -------
    D_mp : ndarray
        Secondary distance MP gauss matrix.
    
    References
    ----------
    .. [1] Schnitzer, D., Flexer, A., Schedl, M., & Widmer, G. (2012). 
           Local and global scaling reduce hubs in space. The Journal of Machine 
           Learning Research, 13(1), 2871–2902.
    """
    # Initialization
    n = D.shape[0]
    log = Logging.ConsoleLogging()

    # Checking input
    IO._check_distance_matrix_shape(D)
    IO._check_valid_metric_parameter(metric)
    if metric == "similarity":
        self_value = 1
    else:  # metric == 'distance':
        self_value = 0
    if issparse(D):
        log.error("Sparse matrices not supported by MP Gauss.")
        raise TypeError("Sparse matrices not supported by MP Gauss.")
    if test_set_ind is None:
        train_set_ind = slice(0, n)
    else:
        train_set_ind = np.setdiff1d(np.arange(n), test_set_ind)

    # Start MP
    D = D.copy()

    np.fill_diagonal(D, self_value)
    # np.fill_diagonal(D, np.nan)

    mu = np.mean(D[train_set_ind], 0)
    sd = np.std(D[train_set_ind], 0, ddof=0)
    # ===========================================================================
    # mu = np.nanmean(D[train_set_ind], 0)
    # sd = np.nanstd(D[train_set_ind], 0, ddof=0)
    # ===========================================================================

    # Code for the BadMatrixSigma error [derived from matlab]
    # ===========================================================================
    # eps = np.spacing(1)
    # epsmat = np.array([[1e5 * eps, 0], [0, 1e5 * eps]])
    # ===========================================================================

    D_mp = np.zeros_like(D)

    # MP Gauss
    for i in range(n):
        if verbose and ((i + 1) % 1000 == 0 or i + 1 == n):
            log.message("MP_gauss: {} of {}.".format(i + 1, n))
        for j in range(i + 1, n):
            # ===================================================================
            # mask = np.isnan(D[[i, j], :])
            # D_mask = np.ma.array(D[[i, j], :], mask=mask)
            # c = np.ma.cov(D_mask, ddof=0)
            # ===================================================================
            c = np.cov(D[[i, j], :], ddof=0)
            x = np.array([D[i, j], D[j, i]])
            m = np.array([mu[i], mu[j]])

            low = np.tile(np.finfo(np.float32).min, 2)
            p12 = mvn.mvnun(low, x, m, c)[0]  # [0]...p, [1]...inform
            if np.isnan(p12):
                # ===============================================================
                # power = 7
                # while np.isnan(p12):
                #     c += epsmat * (10**power)
                #     p12 = mvn.mvnun(low, x, m, c)[0]
                #     power += 1
                # log.warning("p12 is NaN: i={}, j={}. Increased cov matrix by "
                #             "O({}).".format(i, j, epsmat[0, 0]*(10**power)))
                # ===============================================================

                p12 = 0.0
                log.warning("p12 is NaN: i={}, j={}. Set to zero.".format(i, j))

            if metric == "similarity":
                D_mp[i, j] = p12
            else:  # distance
                p1 = norm.cdf(D[i, j], mu[i], sd[i])
                p2 = norm.cdf(D[i, j], mu[j], sd[j])
                D_mp[i, j] = p1 + p2 - p12
    D_mp += D_mp.T
    np.fill_diagonal(D_mp, self_value)
    return D_mp
コード例 #42
0
    pmf_dscrlognorm =  np.insert(np.diff(cdf_dscrlognorm), 0, cdf_dscrlognorm[0])
    h_line_true = plt.plot(yy, pmf_dscrlognorm, '--', c=np.ones(3)*1.0*i/N_interation)
# plt.legend()



# plt.plot(importance_ratio)
plt.plot(xs_rs, xs_weight, '.')



# test mvn cdf efficency:
from scipy.stats import mvn, multivariate_normal
for M in 2**np.arange(9):
    tic=time.time()
    temp = np.log2(mvn.mvnun(np.ones(M)*0,np.ones(M)*10,np.zeros(M),np.eye(M)))
    toc=time.time()
    h_ind = plt.loglog(M, toc-tic, 'ok')
for M in 2**np.arange(9):
    tic=time.time()
    temp = np.log2(mvn.mvnun(np.ones(M)*0,np.ones(M)*10,np.zeros(M),np.eye(M)*0.8+0.2))
    toc=time.time()
    h_cor = plt.loglog(M, toc-tic, '+r')
for M in 2**np.arange(9):
    N = 10000
    tic = time.time()
    dist = multivariate_normal(mean=np.zeros(M), cov=np.eye(M)*0.8+0.2)
    sps =  np.random.rand(N, M)*10
    logpdf_unnmlz = dist.logpdf(x=sps)
    temp = np.mean(dist.pdf(x=sps)*10**M)
    toc= time.time()
コード例 #43
0
ファイル: ml_modulo.py プロジェクト: lisrael1/quants
def ml_map_by_cdf(cov, number_of_bins, mod_size, number_of_modulos=7, plots=False, debug=False):
    '''

    :param cov:
    :param number_of_bins:
    :param mod_size:
    :param number_of_modulos: number of multiply for each side. at 2 we will get multiply of 5X5
    :param plots:
    :param debug:
    :return:
    '''
    import numpy as np
    import pandas as pd
    import itertools

    from scipy.stats import multivariate_normal

    pd.set_option("display.max_columns", 1000)  # don’t put … instead of multi columns
    pd.set_option('expand_frame_repr', False)  # for not wrapping columns if you have many
    pd.set_option("display.max_rows", 30)
    pd.set_option('display.max_colwidth', 1000)

    rounding=10
    df=bins_edges_with_duplication_of_modulo(rounding=rounding, mod_size=mod_size, number_of_bins=number_of_bins, number_of_modulos=number_of_modulos)
    '''now df has pixels with center and edges for each'''
    if plots:
        print('doing cdf')
    if 1:
        rv = multivariate_normal([0, 0], cov)
        if 1:
            if plots: print('starting product')
            cdf=pd.DataFrame(list(itertools.product(*[df[['x_high','x_low']].stack().unique().tolist()] * 2)), columns=list('xy')).round(rounding)  # we do all the unique stuff instead of taking bin_edges because we have floating point issue
            if plots: print('done product')
            '''now calcuating cdf per pixel. each pixel has shared edges with it's neighbors so we will do the cdf offline and then merge it back'''
            if 0:  # cannot do this because at the right upper the cdf will be 1 and the pdf 0...
                cdf['cdf']=0  # for saving cdf calculation, that is slower... we will only calculate cdf on ones that their pdf is high
                cdf['pdf']=multivariate_normal.pdf(cdf[['x', 'y']].values, mean=[0, 0], cov=cov)
                cdf['pdf']=1
                cdf.loc[cdf.pdf>1e-10, 'cdf']=rv.cdf(cdf[cdf.pdf>1e-10][['x', 'y']].values)
                if plots:
                    print('doing cdf on {cdf:,} from total of {total:,} rows'.format(cdf=cdf[cdf.pdf>1e-10].shape[0], total=cdf.shape[0]))
                cdf=cdf.drop('pdf', axis=1)
            cdf['cdf']=rv.cdf(cdf[['x', 'y']].values)

            if plots: print('starting merging results')
            if 0:  # merging is slow, we better use join
                df['high_cdf'] = pd.merge(df[['x_high', 'y_high']].round(rounding), cdf.round(rounding), left_on=['x_high', 'y_high'], right_on=list('xy'), how='left').cdf.values
                df['low_cdf'] = pd.merge(df[['x_low', 'y_low']].round(rounding), cdf.round(rounding), left_on=['x_low', 'y_low'], right_on=list('xy'), how='left').cdf.values
                df['left_cdf'] = pd.merge(df[['x_low', 'y_high']].round(rounding), cdf.round(rounding), left_on=['x_low', 'y_high'], right_on=list('xy'), how='left').cdf.values
                df['down_cdf'] = pd.merge(df[['x_high', 'y_low']].round(rounding), cdf.round(rounding), left_on=['x_high', 'y_low'], right_on=list('xy'), how='left').cdf.values
            else:
                def merging(left, right):
                    '''
                        pd.merge is slow, you better use join
                        and dont do sort_index. it takes time more than it helps join
                        right should be bigger than left, becaues left is only upper lower etc.
                    :param left:
                    :param right:
                    :return:
                    '''
                    col = list('xy')
                    left.columns = col
                    left.set_index(col, inplace=True)
                    if 0:
                        m=left.index.to_frame().reset_index(drop=True).merge(right.index.to_frame().reset_index(drop=True), on=list('xy'), indicator=True, how='outer', suffixes=['','_'])
                        if not m[m._merge=='left'].sort_values(by=['x','y']).empty:
                            print(m[m._merge=='left'].sort_values(by=['x','y']))
                            print(m._merge.value_counts())
                            # print(left.join(right, how='outer').loc[left.join(right, how='outer').isna().cdf.values])
                            # print(pd.concat([left.index.to_frame(),right.index.to_frame()]).drop_duplicates(keep=False).reset_index(drop=True).sort_values(by=['x','y']))
                    merged=left.join(right, how='left').cdf
                    if merged.isna().sum():
                        print('we have nan at the merged step')
                    return merged.values

                cdf=cdf.set_index(list('xy'))
                df['high_cdf'] = merging(df[['x_high', 'y_high']], cdf)
                df['low_cdf'] =  merging(df[['x_low',  'y_low' ]], cdf)
                df['left_cdf'] = merging(df[['x_low',  'y_high']], cdf)
                df['down_cdf'] = merging(df[['x_high', 'y_low' ]], cdf)
            if df.applymap(np.isnan).sum().sum():
                print('WARNING - we have nan values after merging cdf values, and it should be!')
                print('found {nans:,} nans from total of {total:,} cells'.format(nans=df.applymap(np.isnan).sum().sum(), total=df.size))
            if plots: print('done merging results')
        else:  # more time, because we calculate the same dot 4 times
            df['high_cdf']=rv.cdf(df[['x_high', 'y_high']].values)
            df['low_cdf']=rv.cdf(df[['x_low', 'y_low']].values)
            df['left_cdf']=rv.cdf(df[['x_low', 'y_high']].values)
            df['down_cdf']=rv.cdf(df[['x_high', 'y_low']].values)
        df['bin_cdf']=df.high_cdf-df.left_cdf-df.down_cdf+df.low_cdf
    else:  # at bins >10 it's slower because it's not vector operation
        from scipy.stats import mvn
        df['bin_cdf']=df.apply(lambda row:mvn.mvnun(row[['x_low','y_low']].values,row[['x_high','y_high']].values,[0,0],cov.tolist())[0], axis=1)
    if plots: print('done cdf')

    if plots: print('''finding best group at the main modulo''')
    probability_shifts = df.pivot_table(index=['x_modulo_shifts', 'y_modulo_shifts', 'modulo_group_number'], columns=['x_mod', 'y_mod'], values='bin_cdf').idxmax().unstack()
    try:
        probability_map=probability_shifts.applymap(lambda x:x[2])
        x_shift=probability_shifts.applymap(lambda x:x[0])
        y_shift=probability_shifts.applymap(lambda x:x[1])
    except:
        print(probability_shifts)
        print(probability_shifts.nunique())
        print('unknown error. for some reason instead of dictionary, we have float at the content. maybe its when we have only 1 sample?. return')
        return dict()
    pvt=df.pivot_table(index='modulo_group_number', columns=['x_mod', 'y_mod'], values='bin_cdf')
    probability_map_max = pvt.max().unstack()
    if pvt.count().unstack().std().std():
        print('WARNING - probably map modulo didnt worked correctly')
        # df.modulo_group_number.value_counts().sort_values()
    if pvt.applymap(np.isnan).sum().sum():
        print('WARNING - you have nan cells after the pivot, it means that each group has its own x_mod value, which cannot be, unless you have float precision issue')
    if debug:
        print('group_occurrence')
        group_occurrence = 100 * probability_map.stack().value_counts() / probability_map.size
        group_occurrence = group_occurrence.to_frame('percentages').reset_index().rename(columns=dict(index='modulo_group_number'))
        group_occurrence = pd.merge(modulo_group, group_occurrence, on='modulo_group_number', how='right').sort_values('percentages', ascending=False)
        print(group_occurrence)

    if plots:
        print('starting plotting')
        import plotly as py
        import cufflinks
        if debug:
            if 0:
                original_heatmap=df[['x_center','y_center','bin_cdf']][(df.x_center==df.x_mod)&(df.y_center==df.y_mod)].set_index(['x_center','y_center']).unstack()
            else:
                original_heatmap=df[['x_center','y_center','bin_cdf']].set_index(['x_center','y_center']).unstack()
            original_heatmap.columns=original_heatmap.columns.get_level_values(1)
            fig = original_heatmap.figure(kind='heatmap', colorscale='Reds')
            # fig = original_heatmap.figure(kind='surface', colorscale='Reds')
            py.offline.plot(fig, filename='original_heatmap.html')
        if 1:
            fig=probability_map.figure(kind='heatmap', colorscale='Reds')
            py.offline.plot(fig, filename='probability_map.html')
            fig = probability_map_max.figure(kind='heatmap', colorscale='Reds')
            py.offline.plot(fig, filename='probability_map_max.html')
        else:
            probability_map=df.pivot_table(index='modulo_group_number', columns=['x_mod', 'y_mod'], values='bin_cdf').idxmax().sort_values().to_frame('modulo_group_number').astype(str).reset_index()
            fig=probability_map.figure(kind='scatter', x='x_mod', y='y_mod', categories='modulo_group_number')
            py.offline.plot(fig)
    ml=x_shift.stack().to_frame('x_shift').join(y_shift.stack().to_frame('y_shift'))
    return ml
コード例 #44
0
ファイル: copula.py プロジェクト: jpceia/maxlike
def gauss_bivar(x, y, rho):
    return mvnun((-999, -999), (x, y), (0, 0), ((1, rho), (rho, 1)))[0]
コード例 #45
0
ファイル: Success.py プロジェクト: alvincai/SCA-Algorithms
 def Cdf(self,x):
     # cdf = integral of pdf from lower limit to upper limit (x)
     # Reference http://www.nhsilbert.net/source/2014/04/multivariate-normal-cdf-values-in-python/
     return mvn.mvnun(self.lowerLimit63, x, self.meanNorm63, self.covNorm63)
コード例 #46
0
ファイル: Success.py プロジェクト: alvincai/SCA-Algorithms
 def TheoreticalCPA(self, numTraces):
     """
     Returns the DES, CPA Success Rate (calculated using Yunsi Fei's Confusion Analysis formula) for the indicated number of traces
     """
     cov, mean = self.conf.YCovMeanCPA2(self.signal, self.noise, numTraces, correctKey=self.correctSubKey)
     return mvn.mvnun(self.lower, self.upper, mean.reshape(63), cov)[0]
コード例 #47
0
def tripleRectIntegrate2(bounds, mu, var):
    lower, upper = tuple(map(tuple, zip(*bounds)))
    value, inform = mvn.mvnun(lower, upper, mu, np.diag(var))
    return value
コード例 #48
0
        i = np.argmin(A[A>0])
        p_emp_IS = B[A>0][i]
        erreur_emp_IS = A[A>0][i]
        return (p_emp_IS, erreur_emp_IS, Si[A>0][i])
    
    return (0,0,0)


epsilon = 0.1 # Choc distance
npoint = 20

for distance in np.linspace(4, 10, 4):
    for Nsim in [100, 1000, 100000]:
        text_file = open("OutFiles/Output_IS_linear_%s_%s.csv" % (distance,Nsim), "w")
        text_file.write("Distance entre avions : %s \n" % distance)
        text_file.write("Nombre de simulations : %s \n" % Nsim)
        text_file.write("Distance, Probability, Error, Relative error, mu \n \n")
        
        for i in range(23):
            prob, err, mu = IS(distance, Nsim, npoint)
            text_file.write("%s, %.3E, %.3E, %.2f\%%, %s, %.3f \n" % (distance, decimal.Decimal(prob), decimal.Decimal(err),100*err/prob, Nsim, mu))
        
        low = epsilon * np.ones(npoint)
        upp = 100 * np.ones(npoint)
        
        mean = moyenne(npoint, distance)
        cov = covariance(npoint)
        p,i = mvn.mvnun(low,upp,mean,cov) # p prob toutes > 0.1
        real = 1-p # prob qu'il existe une < 0.1
        text_file.write("Real value : %s" % real)
        text_file.close()