Пример #1
0
def objective(pars,Z,ycovar):
    """The objective function"""
    bcost= pars[0]
    t= pars[1]
    Pb= pars[2]
    Xb= pars[3]
    Yb= pars[4]
    Zb= sc.array([Xb,Yb])
    Vb1= sc.exp(pars[5])
    Vb2= sc.exp(pars[6])
    corr= pars[7]
    V= sc.array([[Vb1,sc.sqrt(Vb1*Vb2)*corr],[sc.sqrt(Vb1*Vb2)*corr,Vb2]])
    v= sc.array([-sc.sin(t),sc.cos(t)])
    if Pb < 0. or Pb > 1.:
        return -sc.finfo(sc.dtype(sc.float64)).max
    if corr < -1. or corr > 1.:
        return -sc.finfo(sc.dtype(sc.float64)).max
    delta= sc.dot(v,Z.T)-bcost
    sigma2= sc.dot(v,sc.dot(ycovar,v))

    ndata= Z.shape[0]
    detVycovar= sc.zeros(ndata)
    deltaOUT= sc.zeros(ndata)
    for ii in range(ndata):
        detVycovar[ii]= m.sqrt(linalg.det(V+ycovar[:,ii,:]))
        deltaOUT[ii]= sc.dot(Z[ii,:]-Zb,sc.dot(linalg.inv(V+ycovar[:,ii,:]),Z[ii,:]-Zb))
    return sc.sum(sc.log((1.-Pb)/sc.sqrt(2.*m.pi*sigma2)*
                         sc.exp(-0.5*delta**2./sigma2)
                         +Pb/2./m.pi/detVycovar
                         *sc.exp(-0.5*deltaOUT)))
Пример #2
0
 def __init__(self, renderer=True, realtime=True, ip="127.0.0.1", port="21560"):
     # initialize base class
     GraphicalEnvironment.__init__(self)
     self.actLen=12
     self.mySensors=sensors.Sensors(["EdgesReal"])
     self.dists=array([20.0, sqrt(2.0)*20, sqrt(3.0)*20])
     self.gravVect=array([0.0,-100.0,0.0])
     self.centerOfGrav=zeros((1,3),float)
     self.pos=ones((8,3),float)
     self.vel=zeros((8,3),float)
     self.SpringM = ones((8,8),float)
     self.d=60.0
     self.dt=0.02
     self.startHight=10.0
     self.dumping=0.4
     self.fraktMin=0.7
     self.fraktMax=1.3
     self.minAkt=self.dists[0]*self.fraktMin
     self.maxAkt=self.dists[0]*self.fraktMax
     self.reset()
     self.count=0
     self.setEdges()
     self.act(array([20.0]*12))
     self.euler()
     self.realtime=realtime
     self.step=0
     if renderer:
         self.setRenderInterface(FlexCubeRenderInterface(ip, port))
         self.getRenderInterface().updateData(self.pos, self.centerOfGrav)
Пример #3
0
	def _initParams_fast(self):
		""" 
		initialize the gp parameters
			1) project Y on the known factor X0 -> Y0
				average variance of Y0 is used to initialize the variance explained by X0
			2) considers the residual Y1 = Y-Y0 (this equivals to regress out X0)
			3) perform PCA on cov(Y1) and considers the first k PC for initializing X
			4) the variance of all other PCs is used to initialize the noise
			5) the variance explained by interaction is set to a small random number 
		"""
		Xd = LA.pinv(self.X0)
		Y0 = self.X0.dot(Xd.dot(self.Y))
		Y1 = self.Y-Y0
		YY = SP.cov(Y1)
		S,U = LA.eigh(YY)
		X = U[:,-self.k:]*SP.sqrt(S[-self.k:])
		a = SP.array([SP.sqrt(Y0.var(0).mean())])
		b = 1e-3*SP.randn(1)
		c = SP.array([SP.sqrt((YY-SP.dot(X,X.T)).diagonal().mean())])
		# gp hyper params
		params = limix.CGPHyperParams()
		if self.interaction:
			params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F'),SP.ones(1),b])
		else:
			params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F')])
		params['lik'] = c
		return params
Пример #4
0
def V_Multi(m, n):
    x,y,z=(m[0],m[1],m[2])
    a,b,c=(n[0],n[1],n[2])
    
    p,q,r = (a-x,b-y,c-z)
    
    return 90-degrees(acos( (x*p+y*q+z*r)/ (sqrt(x**2+y**2+z**2)*sqrt(p**2+q**2+r**2))))
Пример #5
0
def Rz_to_coshucosv(R,z,delta=1.):
    """
    NAME:

       Rz_to_coshucosv

    PURPOSE:

       calculate prolate confocal cosh(u) and cos(v) coordinates from R,z, and delta

    INPUT:

       R - radius

       z - height

       delta= focus

    OUTPUT:

       (cosh(u),cos(v))

    HISTORY:

       2012-11-27 - Written - Bovy (IAS)

    """
    d12= (z+delta)**2.+R**2.
    d22= (z-delta)**2.+R**2.
    coshu= 0.5/delta*(sc.sqrt(d12)+sc.sqrt(d22))
    cosv=  0.5/delta*(sc.sqrt(d12)-sc.sqrt(d22))
    return (coshu,cosv)
Пример #6
0
def r_t_mat_anallo(an1, an2):
    """ Returns R12, T12, R21, T21 at an interface between thin films.

        R12 is the reflection matrix from Anallo 1 off Anallo 2

        The sign of elements in T12 and T21 is fixed to be positive,
        in the eyes of `numpy.sign`
    """
    if len(an1.k_z) != len(an2.k_z):
        raise ValueError, "Need the same number of plane waves in \
        Anallos %(an1)s and %(an2)s" % {'an1' : an1, 'an2' : an2}

    Z1 = an1.Z()
    Z2 = an2.Z()

    R12 = np.mat(np.diag((Z2 - Z1)/(Z2 + Z1)))
    # N.B. there is potentially a branch choice problem here, stemming
    # from the normalisation to unit flux.
    # We normalise each field amplitude by
    # $chi^{\pm 1/2} = sqrt(k_z/k)^{\pm 1} = sqrt(Z/Zc)^{\pm 1}$
    # The choice of branch in those square roots must be the same as the
    # choice in the related square roots that we are about to take:
    T12 = np.mat(np.diag(2.*sqrt(Z2)*sqrt(Z1)/(Z2+Z1)))
    R21 = -R12
    T21 = T12

    return R12, T12, R21, T21
Пример #7
0
def get_stderr_fit(f,Xdata,popt,pcov):
    
   Y= f(Xdata, popt)
   listdY=[]
   for i in xrange(len(popt)):
       p=popt[i]
       dp= abs(p)/1e6+1e-20
       popt[i]+=dp
       Yi= f(Xdata, popt)
       dY= (Yi-Y)/dp
       listdY.append(dY)
       popt[i]-=dp
   listdY= scipy.array(listdY)
   #list dy is the d in the derivation. it has N X M
   #pcov is N X N
   
   left= scipy.dot(listdY.T,pcov)
   right=scipy.dot(left,listdY)
   
   sigma2y= right.diagonal()
   #sigma2y is a standard function of fit
   mean_sigma2y= scipy.mean(right.diagonal())
   
   M= Xdata.shape[0]
   N= len(popt)
   avg_stddev_data=scipy.sqrt(M*mean_sigma2y/N)
   sigmay= scipy.sqrt(sigma2y)
   return sigmay,avg_stddev_data
Пример #8
0
    def test_covariate_shift(self):
        n_sample = 100
        # Biased training
        var_bias = .5**2
        mean_bias = .7
        x_train = SP.random.randn(n_sample)*SP.sqrt(var_bias) + mean_bias
        y_train = self.complete_sample(x_train)

        # Unbiased test set
        var = .3**2
        mean = 0

        x_test = SP.random.randn(n_sample)*SP.sqrt(var) + mean
        x_complete = SP.hstack((x_train, x_test))

        kernel = utils.getQuadraticKernel(x_complete, d=1) +\
            10 * SP.dot(x_complete.reshape(-1, 1), x_complete.reshape(1, -1))
        kernel = utils.scale_K(kernel)
        kernel_train = kernel[SP.ix_(SP.arange(x_train.size),
                                     SP.arange(x_train.size))]
        kernel_test = kernel[SP.ix_(SP.arange(x_train.size, x_complete.size),
                             SP.arange(x_train.size))]

        mf = MF(n_estimators=100, kernel=kernel_train, min_depth=0,
                subsampling=False)
        mf.fit(x_train.reshape(-1, 1), y_train.reshape(-1, 1))
        response_gp = mf.predict(x_test.reshape(-1, 1), kernel_test, depth=0)
        self.assertTrue(((response_gp - self.polynom(x_test))**2).sum() < 2.4)
Пример #9
0
    def specular_incidence(self, pol = 'TE'):
        """ Return a vector of plane wave amplitudes corresponding
            to specular incidence in the specified polarisation.

            i.e. all elements are 0 except the zeroth order.
        """
        # Element corresponding to 0th order, TE
        spec_TE = self.specular_order
        # Element corresponding to 0th order, TM
        spec_TM = self.specular_order + self.structure.num_pw_per_pol
        tot_num_pw = self.structure.num_pw_per_pol * 2

        inc_amp = np.mat(np.zeros(tot_num_pw, dtype='complex128')).T
        if   'TE' == pol:
            inc_amp[spec_TE] = 1
        elif 'TM' == pol:
            inc_amp[spec_TM] = 1
        elif 'R Circ' == pol:
            inc_amp[spec_TE] = 1/sqrt(2.)
            inc_amp[spec_TM] = +1j/sqrt(2.)
        elif 'L Circ' == pol:
            inc_amp[spec_TE] = 1/sqrt(2.)
            inc_amp[spec_TM] = -1j/sqrt(2.)
        else:
            raise NotImplementedError, \
            "Must select from the currently implemented polarisations; \
             TE, TM, R Circ, L Circ."

        return inc_amp
Пример #10
0
def MakePulseDataRepLPC(pulse,spec,N,rep1,numtype = sp.complex128):
    """ This will make data by assuming the data is an autoregressive process.
        Inputs
            spec - The properly weighted spectrum.
            N - The size of the ar process used to model the filter.
            pulse - The pulse shape.
            rep1 - The number of repeats of the process.
        Outputs
            outdata - A numpy Array with the shape of the """

    lp = len(pulse)
    lenspec = len(spec)
    r1 = scfft.ifft(scfft.ifftshift(spec))
    rp1 = r1[:N]
    rp2 = r1[1:N+1]
    # Use Levinson recursion to find the coefs for the data
    xr1 = sp.linalg.solve_toeplitz(rp1, rp2)
    lpc = sp.r_[sp.ones(1), -xr1]
    # The Gain  term.
    G = sp.sqrt(sp.sum(sp.conjugate(r1[:N+1])*lpc))
    Gvec = sp.r_[G, sp.zeros(N)]
    Npnt = (N+1)*3+lp
    # Create the noise vector and normalize
    xin = sp.random.randn(rep1, Npnt)+1j*sp.random.randn(rep1, Npnt)
    xinsum = sp.tile(sp.sqrt(sp.mean(xin.real**2+xin.imag**2, axis=1))[:, sp.newaxis],(1, Npnt))
    xin = xin/xinsum
    outdata = sp.signal.lfilter(Gvec, lpc, xin, axis=1)
    outpulse = sp.tile(pulse[sp.newaxis], (rep1, 1))
    outdata = outpulse*outdata[:, 2*N:2*N+lp]
    return outdata
Пример #11
0
    def survival_function(loss_ratio, **kwargs):
        """
            Static method that prepares the calculation parameters
            to be passed to stats.lognorm.sf

            :param loss_ratio: current loss ratio
            :type loss_ratio: float

            :param kwargs: convenience dictionary
            :type kwargs: :py:class:`dict` with the following
                keys:
                    **vf** - vulnerability function as provided by
                            :py:class:`openquake.shapes.VulnerabilityFunction`
                    **col** - matrix column number
        """
        vuln_function = kwargs.get('vf')
        position = kwargs.get('col')

        vf_loss_ratio = vuln_function.loss_ratios[position]

        stddev = vuln_function.covs[position] * vf_loss_ratio

        variance = stddev ** 2.0

        sigma = sqrt(log((variance / vf_loss_ratio ** 2.0) + 1.0))
        mu = exp(log(vf_loss_ratio ** 2.0 /
            sqrt(variance + vf_loss_ratio ** 2.0)))

        return stats.lognorm.sf(loss_ratio, sigma, scale=mu)
Пример #12
0
def compute_continuous_prob_value(parameters, distribution, rvs):
    mean = float(parameters[0])
    stddev = float(parameters[1]) / 100 * mean
    A = float(parameters[2])
    B = float(parameters[3])
    result = float("-inf")

    if rvs is None:
        while result <= A or result > B:

            if distribution == "normal":
                rvs = stats.norm.rvs
                result = rvs(mean, stddev)

            elif distribution == "lognormal":
                variance = stddev ** 2.0
                mu = log(mean ** 2.0 / sqrt(variance + mean ** 2.0))
                sigma = sqrt(log((variance / mean ** 2.0) + 1.0))
                rvs = stats.lognorm.rvs
                result = rvs(sigma, scale=scipy.exp(mu))

            elif distribution == "gamma":
                betha = (stddev) ** 2 / mean
                alpha = mean / betha
                rvs = stats.gamma.rvs
                result = rvs(alpha, scale=betha)
    else:
        result = 1

    return result
Пример #13
0
def test_periodogram_csd():
    """Test corner cases of  periodogram_csd"""

    arsig1, _, _ = utils.ar_generator(N=1024)
    arsig2, _, _ = utils.ar_generator(N=1024)

    tseries = np.vstack([arsig1, arsig2])
    
    Sk = np.fft.fft(tseries)

    f1, c1 = tsa.periodogram_csd(tseries)
    f2, c2 = tsa.periodogram_csd(tseries, Sk=Sk)
    npt.assert_equal(c1, c2)

    # Check that providing a complex signal does the right thing
    # (i.e. two-sided spectrum): 
    N = 1024 
    r, _, _ = utils.ar_generator(N=N)
    c, _, _ = utils.ar_generator(N=N)
    arsig1 = r + c * scipy.sqrt(-1)

    r, _, _ = utils.ar_generator(N=N)
    c, _, _ = utils.ar_generator(N=N)
    arsig2 = r + c * scipy.sqrt(-1)

    tseries = np.vstack([arsig1, arsig2])

    f, c = tsa.periodogram_csd(tseries)
    npt.assert_equal(f.shape[0], N) # Should be N, not the one-sided N/2 + 1
Пример #14
0
def simulate_genotypes_w_ld(n, m, n_samples, m_ld_chunk_size=100, r2=0.9, validation = False):
	val_set_gen = []
	val_set_D = []
	for i in xrange(n_samples):
		snps = sp.zeros((m, n), dtype='float32')
		num_chunks = m / m_ld_chunk_size
		for chunk in xrange(num_chunks):
			X = sp.zeros((m_ld_chunk_size, n), dtype='float32')
			X[0] = stats.norm.rvs(size=n)
			for j in xrange(1, m_ld_chunk_size):
				X[j] = sp.sqrt(r2) * X[j - 1] + sp.sqrt(1 - r2) * stats.norm.rvs(size=n)
			start_i = chunk * m_ld_chunk_size
			stop_i = start_i + m_ld_chunk_size
			snps[start_i:stop_i] = X
		snps_means = sp.mean(snps, axis=1)
		snps_stds = sp.std(snps, axis=1)
		snps_means.shape = (m, 1)
		snps_stds.shape = (m, 1)
		snps = (snps - snps_means) / snps_stds
		val_set_gen.append(snps)
		if validation:
			val_set_D = 0
		else:
			val_set_D.append(get_sample_D(n=n, m=m, num_sim=100, r2=r2))
		if sp.isnan(val_set_gen).any():
			return simulate_genotypes_w_ld(n, m, n_samples, m_ld_chunk_size=100, r2=0.9)
	return val_set_gen, val_set_D
Пример #15
0
 def _getScalesDiag(self,termx=0):
     """
     Internal function for parameter initialization
     Uses 2 term single trait model to get covar params for initialization
     
     Args:
         termx:      non-noise term terms that is used for initialization 
     """
     assert self.P>1, 'VarianceDecomposition:: diagonal init_method allowed only for multi trait models' 
     assert self.noisPos!=None, 'VarianceDecomposition:: noise term has to be set'
     assert termx<self.n_randEffs-1, 'VarianceDecomposition:: termx>=n_randEffs-1'
     assert self.trait_covar_type[self.noisPos] not in ['lowrank','block','fixed'], 'VarianceDecomposition:: diagonal initializaiton not posible for such a parametrization'
     assert self.trait_covar_type[termx] not in ['lowrank','block','fixed'], 'VarianceDecimposition:: diagonal initializaiton not posible for such a parametrization'
     scales = []
     res = self._getH2singleTrait(self.vd.getTerm(termx).getK())
     scaleg = sp.sqrt(res['varg'].mean())
     scalen = sp.sqrt(res['varn'].mean())
     for term_i in range(self.n_randEffs):
         if term_i==termx:
             _scales = scaleg*self.diag[term_i]
         elif term_i==self.noisPos:
             _scales = scalen*self.diag[term_i]
         else:
             _scales = 0.*self.diag[term_i]
         if self.jitter[term_i]>0:
             _scales = sp.concatenate((_scales,sp.array([sp.sqrt(self.jitter[term_i])])))
         scales.append(_scales)
     return sp.concatenate(scales)
Пример #16
0
    def _Efficiency(self):
        Gas = self.kwargs["entradaGas"]
        Liquido = self.kwargs["entradaLiquido"]
        rhoS = Gas.solido.rho
        muG = Gas.Gas.mu
        rhoL = Liquido.Liquido.rho

        rendimiento_fraccional = []
        if self.kwargs["modelo_rendimiento"] == 0:
            # Modelo de Johnstone (1954)
            l = sqrt(pi/8)*Gas.Gas.mu/0.4987445/sqrt(Gas.Gas.rho*Gas.P)
            for dp in Gas.solido.diametros:
                Kn = l/dp*2
                C = Cunningham(l, Kn)
                kp = C*rhoS*dp**2*self.Vg/9/Gas.Gas.mu/self.dd
                penetration = exp(-self.k*self.R*kp**0.5)
                rendimiento_fraccional.append(1-penetration)

        elif self.kwargs["modelo_rendimiento"] == 1:
            # Modelo de Calvert (1972)
            l = sqrt(pi/8)*muG/0.4987445/sqrt(Gas.Gas.rho*Gas.P)
            for dp in Gas.solido.diametros:
                Kn = l/dp*2
                C = Cunningham(l, Kn)
                kp = C*rhoS*dp**2*self.Vg/9/muG/self.dd
                b = (-0.7-kp*self.f+1.4*log((kp*self.f+0.7)/0.7)+0.49 /
                     (0.7+kp*self.f))
                penetration = exp(self.R*self.Vg*rhoL*self.dd/55/muG*b/kp)
                if penetration > 1:
                    penetration = 1
                elif penetration < 0:
                    penetration = 0
                rendimiento_fraccional.append(1-penetration)

        return rendimiento_fraccional
def get_stderr_fit(f,Xdata,popt,pcov):
	Y=f(Xdata,popt)
	listdY=[]
	for i in xrange(len(popt)):
		p=popt[i]
		dp=abs(p)/1e6+1e-20
		popt[i]+=dp
		Yi=f(Xdata,popt)
		dY=(Yi-Y)/dp
		listdY.append(dY)
		popt[i]-=dp
	listdY=scipy.array(listdY)
	#listdY is an array with N rows and M columns, N=len(popt), M=len(xdata[0])
	#pcov is an array with N rows and N columns
	left=scipy.dot(listdY.T,pcov) 
	#left is an array of M rows and N columns
	right=scipy.dot(left,listdY)
	#right is an array of M rows and M columns
	sigma2y=right.diagonal()
	#sigma2y is standard error of fit and function  of X
	mean_sigma2y=scipy.mean(right.diagonal())
	M=Xdata.shape[1];print M
	N=len(popt);print N
	avg_stddev_data=scipy.sqrt(M*mean_sigma2y/N)
	#this is because if exp error is constant at sig_dat,then mean_sigma2y=N/M*sig_dat**2
	sigmay=scipy.sqrt(sigma2y)
	return sigmay,avg_stddev_data
Пример #18
0
 def __init__(self, render=True, realtime=True, ip="127.0.0.1", port="21560"):
     # initialize base class
     self.render = render
     if self.render:
         self.updateDone = True
         self.updateLock = threading.Lock()
         self.server = UDPServer(ip, port)
     self.actLen = 12
     self.mySensors = sensors.Sensors(["EdgesReal"])
     self.dists = array([20.0, sqrt(2.0) * 20, sqrt(3.0) * 20])
     self.gravVect = array([0.0, -100.0, 0.0])
     self.centerOfGrav = zeros((1, 3), float)
     self.pos = ones((8, 3), float)
     self.vel = zeros((8, 3), float)
     self.SpringM = ones((8, 8), float)
     self.d = 60.0
     self.dt = 0.02
     self.startHight = 10.0
     self.dumping = 0.4
     self.fraktMin = 0.7
     self.fraktMax = 1.3
     self.minAkt = self.dists[0] * self.fraktMin
     self.maxAkt = self.dists[0] * self.fraktMax
     self.reset()
     self.count = 0
     self.setEdges()
     self.act(array([20.0] * 12))
     self.euler()
     self.realtime = realtime
     self.step = 0
Пример #19
0
    def f(self,x,t):
        N = len(x)/2
        xdot = pl.array([])

        # modulus the x for periodicity.
        x[N:2*N]= x[N:2*N]%self.d
        # HERE ---->> 1Dify
        for i in range(N):
            temp = 0.0
            for j in range(N):
                if i == j:
                    continue
                #repulsive x interparticle force of j on i
                temp += self.qq*(x[N+i]-x[N+j])/(pl.sqrt((x[N+i]-x[N+j])**2)**3)

                # Add the forces from the two ancored charges
                # First one at x=0
                temp += self.qq*(x[N+i]-0.0)/(pl.sqrt((x[N+i]-0.0)**2)**3)
                # Second one at the other boundary i.e. self.d
                temp += self.qq*(x[N+i]-self.d)/(pl.sqrt((x[N+i]-self.d)**2)**3)

            # periodic force on particle i
            temp += self.As[i]*pl.sin(x[N+i])*pl.cos(t)
            temp -= self.beta*x[i]
            xdot = pl.append(xdot,temp)
        for i in range(N):
            xdot = pl.append(xdot,x[i])
        return xdot
Пример #20
0
def relative_bin_deviation(h1, h2): # 79 us @array, 104 us @list \w 100 bins
    r"""
    Calculate the bin-wise deviation between two histograms.
    
    The relative bin deviation between two histograms :math:`H` and :math:`H'` of size
    :math:`m` is defined as:
    
    .. math::
    
        d_{rbd}(H, H') = \sum_{m=1}^M
            \frac{
                \sqrt{(H_m - H'_m)^2}
              }{
                \frac{1}{2}
                \left(
                    \sqrt{H_m^2} +
                    \sqrt{{H'}_m^2}
                \right)
              }
    
    *Attributes:*

    - semimetric (triangle equation satisfied?)
    
    *Attributes for normalized histograms:*

    - :math:`d(H, H')\in[0, \infty)`
    - :math:`d(H, H) = 0`
    - :math:`d(H, H') = d(H', H)`
    
    *Attributes for not-normalized histograms:*

    - :math:`d(H, H')\in[0, \infty)`
    - :math:`d(H, H) = 0`
    - :math:`d(H, H') = d(H', H)`
    
    *Attributes for not-equal histograms:*

    - not applicable 
    
    Parameters
    ----------
    h1 : sequence
        The first histogram.
    h2 : sequence
        The second histogram, same bins as ``h1``.
    
    Returns
    -------
    relative_bin_deviation : float
        Relative bin deviation between the two histograms.
    """
    h1, h2 = __prepare_histogram(h1, h2)
    numerator = scipy.sqrt(scipy.square(h1 - h2))
    denominator = (scipy.sqrt(scipy.square(h1)) + scipy.sqrt(scipy.square(h2))) / 2.
    old_err_state = scipy.seterr(invalid='ignore') # divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0
    result = numerator / denominator
    scipy.seterr(**old_err_state)
    result[scipy.isnan(result)] = 0 # faster than scipy.nan_to_num, which checks for +inf and -inf also
    return scipy.sum(result)
Пример #21
0
def xyz2longlat(x,y,z):
    """ converts cartesian x,y,z coordinates into spherical longitude and latitude """
    r = sc.sqrt(x*x + y*y + z*z)
    long = sc.arctan2(y,x)
    d = sc.sqrt(x*x + y*y)
    lat = sc.arcsin(z/r)
    return long*deg, lat*deg, r
Пример #22
0
def generate_test_data_w_sum_stats(h2=0.5, n=100000, n_sample=100, m=50000, model='gaussian', 
                                         p=1.0, conseq_r2=0, m_ld_chunk_size=100):
    """
    Generate 
    """
    #Get LD sample matrix
    D_sample = genotypes.get_sample_D(200,conseq_r2=conseq_r2,m=m_ld_chunk_size)
    
    #Simulate beta_hats
    ret_dict = simulate_beta_hats(h2=h2, n=n, n_sample=n_sample, m=m, model=model, p=p, 
                                    conseq_r2=conseq_r2, m_ld_chunk_size=m_ld_chunk_size, D_sample=D_sample)
    
    #Simulate test genotypes
    test_snps = genotypes.simulate_genotypes_w_ld(n_sample=n_sample, m=m, conseq_r2=conseq_r2, 
                                                  m_ld_chunk_size=m_ld_chunk_size)
    ret_dict['test_snps'] = test_snps
    
    #Simulate test phenotypes
    phen_noise = stats.norm.rvs(0, sp.sqrt(1.0 - h2), size=n_sample) 
    phen_noise = sp.sqrt((1.0 - h2) / sp.var(phen_noise)) * phen_noise
    genetic_part = sp.dot(test_snps.T, ret_dict['betas'])
    genetic_part = sp.sqrt(h2 / sp.var(genetic_part)) * genetic_part
    test_phen = genetic_part + phen_noise
    ret_dict['test_phen'] = test_phen
    return ret_dict
Пример #23
0
def drazin(A, tol):
    CB = A.copy()

    Bs = []
    Cs = []
    k = 1

    while not (sp.absolute(CB) < tol).all() and sp.absolute(la.det(CB)) < tol:
        U, s, Vh = la.svd(CB)
        S = sp.diag(s)
        S = S * (S > tol)
        r = sp.count_nonzero(S)
        B = sp.dot(U, sp.sqrt(S))
        C = sp.dot(sp.sqrt(S), Vh)
        B = B[:, 0:r]
        Bs.append(B)
        C = C[0:r, :]
        Cs.append(C)
        CB = sp.dot(C, B)
        k += 1

    D = sp.eye(A.shape[0])
    for B in Bs:
        D = sp.dot(D, B)
    if (sp.absolute(CB) < tol).all():
        D = sp.dot(D, CB)
    else:
        D = sp.dot(D, np.linalg.matrix_power(CB, -(k + 1)))
    for C in reversed(Cs):
        D = sp.dot(D, C)
    return D
Пример #24
0
    def _genBgTerm_fromXX(self,vTot,vCommon,XX,a=None,c=None):
        """
        generate background term from SNPs

        Args:
            vTot: variance of Yc+Yi
            vCommon: variance of Yc
            XX: kinship matrix
            a: common scales, it can be set for debugging purposes
            c: indipendent scales, it can be set for debugging purposes
        """
        vSpecific = vTot-vCommon

        SP.random.seed(0)
        if c==None: c = SP.randn(self.P)
        XX += 1e-3 * SP.eye(XX.shape[0])
        L = LA.cholesky(XX,lower=True)

        # common effect
        R = self.genWeights(self.N,self.P)
        A = self.genTraitEffect()
        if a is not None: A[0,:] = a
        Yc = SP.dot(L,SP.dot(R,A))
        Yc*= SP.sqrt(vCommon)/SP.sqrt(Yc.var(0).mean())

        # specific effect
        R = SP.randn(self.N,self.P)
        Yi = SP.dot(L,SP.dot(R,SP.diag(c)))
        Yi*= SP.sqrt(vSpecific)/SP.sqrt(Yi.var(0).mean())

        return Yc, Yi
Пример #25
0
def _sampling_matrix(hessian, cutoff=0, temperature=1, step_scale=1):    
    # basically need SVD of hessian - singular values and eigenvectors
    # hessian = u * diag(singVals) * vh
    u, sing_vals, vh = scipy.linalg.svd(0.5 * hessian)

    # scroll through the singular values and find the ones whose inverses will
    # be huge and set them to zero also, load up the array of singular values 
    # that we store
    # cutoff = (1.0/_.singVals[0])*1.0e03
    # double cutoff = _.singVals[0]*1.0e-02
    cutoff_sing_val = cutoff * max(sing_vals)

    D = 1.0/scipy.maximum(sing_vals, cutoff_sing_val)

    ## now fill in the sampling matrix ("square root" of the Hessian)
    ## note that sqrt(D[i]) is taken here whereas Kevin took sqrt(D[j])
    ## this is because vh is the transpose of his PT -JJW
    samp_mat = scipy.transpose(vh) * scipy.sqrt(D)

    # Divide the sampling matrix by an additional factor such
    # that the expected quadratic increase in cost will be about 1.
    cutoff_vals = scipy.compress(sing_vals < cutoff_sing_val, sing_vals)
    if len(cutoff_vals):
        scale = scipy.sqrt(len(sing_vals) - len(cutoff_vals)
                           + sum(cutoff_vals)/cutoff_sing_val)
    else:
        scale = scipy.sqrt(len(sing_vals))

    samp_mat /= scale
    samp_mat *= step_scale
    samp_mat *= scipy.sqrt(temperature)

    return samp_mat
Пример #26
0
def ZYFF(Te, EIJ):
    """Computes `ZY` and `FF`, used in other functions.
    
    If `EIJ` is a scalar, the output has the same shape as `Te`. If `EIJ` is an
    array, the output has shape `EIJ.shape` + `Te.shape`. This should keep the
    output broadcastable with `Te`.
    
    Parameters
    ----------
    Te : array of float
        Electron temperature. Shape is arbitrary.
    EIJ : scalar float or array of float
        Energy difference.
    """
    # Expand the dimensions of EIJ to produce the desired output shape:
    Te = scipy.asarray(Te, dtype=float)
    EIJ = scipy.asarray(EIJ, dtype=float)
    for n in xrange(Te.ndim):
        EIJ = scipy.expand_dims(EIJ, axis=-1)
    
    ZY = EIJ / (1e3 * Te)
    
    FF = scipy.zeros_like(ZY)
    mask = (ZY >= 1.5)
    FF[mask] = scipy.log((ZY[mask] + 1) / ZY[mask]) - (0.36 + 0.03 * scipy.sqrt(ZY[mask] + 0.01)) / (ZY[mask] + 1)**2
    mask = ~mask
    FF[mask] = scipy.log((ZY[mask] + 1) / ZY[mask]) - (0.36 + 0.03 / scipy.sqrt(ZY[mask] + 0.01)) / (ZY[mask] + 1)**2
    
    return ZY, FF
Пример #27
0
 def test_gets_thermal_with_correlated(self):
     """Checks that the part of the freq_modes code that compensates the
     thermal for mode subtraction works."""
     self.data *= sp.sqrt(self.bw * 2)  # Makes thermal unity.
     # Need to add something correlated so the modes arn't just the
     # channels.
     correlated_overf = noise_power.generate_overf_noise(1, 
                                 -2, 0.5, self.dt, self.data.shape[0])
     correlated_overf += (rand.normal(size=(self.data.shape[0],))
                          * sp.sqrt((self.bw * 2) * 0.3))
     self.data += correlated_overf[:,None,None,None] / sp.sqrt(self.nf)
     Blocks = self.make_blocks()
     # Mask a channel out completly.
     for Data in Blocks:
         Data.data[:,:,:,3] = ma.masked
     model = 'freq_modes_over_f_4'  # Take out 20% of the thermal power.
     parameters = mn.measure_noise_parameters(Blocks,
                                             [model])
     right_ans = sp.ones(self.nf)
     right_ans[3] = T_infinity**2
     for p in parameters.itervalues():
         pars = p[model]
         thermal = pars['thermal']
         self.assertTrue(sp.allclose(thermal,
                                     right_ans, rtol=0.3))
         mean_thermal = sp.mean(thermal[right_ans==1])
         self.assertTrue(sp.allclose(mean_thermal, 1, rtol=0.05))
         self.assertTrue(sp.allclose(pars['over_f_mode_0']['thermal'],
                                     0.3, atol=0.1))
Пример #28
0
def ndot_product(features1,
                 features2 = None):

    """
    generates kernel based on normalized dot product

    input :
    features1 : vectors representing the rows in the matrix
    features2 : vectors representing the columns in the matrix

    output :
    out : similarity matrix

    """

    features1.shape = features1.shape[0], -1
    features1 = features1/sp.sqrt((features1**2.).sum(1))[:, None]

    if features2 is None:
        features2 = features1
    else:
        features2.shape = features2.shape[0], -1
        features2 = features2/sp.sqrt((features2**2.).sum(1))[:, None]

    out = sp.dot(features1, features2.T)

    return out
Пример #29
0
def heart(scale,ndim,time):
    
    
    percent = 1.05 + 0.5*np.random.rand()
    
    real_heart = int(time*percent)
    
    ratio = 0.5*np.random.rand()  #xy ratio
    x = scipy.linspace(-2,2,real_heart/2)
    y1 = scipy.sqrt(1-(abs(x)-1)**2)
    y2 = -3*scipy.sqrt(1-(abs(x[::-1])/2)**0.5)
    
    Y = np.concatenate([y1,y2])
    X  = ratio*np.concatenate([x,x[::-1]])
    
    shift = np.random.randint(0,real_heart)
    Y = np.roll(Y,shift)[:time]
    X = np.roll(X,shift)[:time]
    traj = np.array([X,Y,np.zeros_like(Y)]).T
    
    alpha = 2*3.14 * np.random.rand()
    
    if ndim == 2:
        traj = traj[::,:2]
                    
   
    
    noise = diffusive(scale/800.,ndim,time+1,epsilon=1e-7)
    return scale*random_rot(traj,alpha,ndim) + noise[:-1]-noise[1:]
Пример #30
0
def pix2sky(header,x,y):
	hdr_info = parse_header(header)
	x0 = x-hdr_info[1][0]+1.	# Plus 1 python->image
	y0 = y-hdr_info[1][1]+1.
	x0 = x0.astype(scipy.float64)
	y0 = y0.astype(scipy.float64)
	x = hdr_info[2][0,0]*x0 + hdr_info[2][0,1]*y0
	y = hdr_info[2][1,0]*x0 + hdr_info[2][1,1]*y0
	if hdr_info[3]=="DEC":
		a = x.copy()
		x = y.copy()
		y = a.copy()
		ra0 = hdr_info[0][1]
		dec0 = hdr_info[0][0]/raddeg
	else:
		ra0 = hdr_info[0][0]
		dec0 = hdr_info[0][1]/raddeg
	if hdr_info[5]=="TAN":
		r_theta = scipy.sqrt(x*x+y*y)/raddeg
		theta = arctan(1./r_theta)
		phi = arctan2(x,-1.*y)
	elif hdr_info[5]=="SIN":
		r_theta = scipy.sqrt(x*x+y*y)/raddeg
		theta = arccos(r_theta)
		phi = artan2(x,-1.*y)
	ra = ra0 + raddeg*arctan2(-1.*cos(theta)*sin(phi-pi),
				   sin(theta)*cos(dec0)-cos(theta)*sin(dec0)*cos(phi-pi))
	dec = raddeg*arcsin(sin(theta)*sin(dec0)+cos(theta)*cos(dec0)*cos(phi-pi))

	return ra,dec
Пример #31
0
    def plotRatioWav(self, inputfilename, no_peak=False):
        '''
        Plot ratios as a function of their central wavelength.
        
        In blue and green, the peak-to-peak ratios are plotted. Blue for normal
        peak-to-peak ratios, green for ratios that involve a data point that is
        in the noise. Plotting peak-to-peak ratios can be turned off with the 
        no_peak keyword.
        
        In red and magenta, the integrated line strengths are plotted. Red for
        isolated lines that are not tagged as a blend, magenta for lines that 
        are tagged as a blend due to 1) too wide lines with respect to the PACS
        resolution, or 2) multiple sample transitions in the width of an 
        observed line. 
        
        @param inputfilename: the input filename for the grid, which will be 
                              attached to the final plot filename
        @type inputfilename: string
        
        @keyword no_peak: Hide the peak ratios. 
                          
                          (default: False)
        @type no_peak: bool
        
        '''

        if not self.chi2_inttot:
            print "No Sphinx models calculated. Aborting statistics plot. "
            return
        this_grid = self.sortStarGrid()
        plot_filenames = []
        inst = self.instrument
        for star in this_grid:
            this_id = star['LAST_%s_MODEL' % inst.instrument.upper()]
            lp = []
            waves = []
            ratios = []
            ratios_err = []

            #-- the ratios included in the statistics
            if not no_peak:
                this_wav_inc = self.getRatios(data_type='central_mwav',\
                                              this_id=this_id)
                this_ratio_inc = self.getRatios(this_id=this_id)
                if list(this_wav_inc):
                    waves.append(this_wav_inc)
                    ratios.append(this_ratio_inc)
                    ratios_err.append(None)
                    lp.append('ob')

                #-- ratios replaced by lower limits if data point is in the noise
                #   ie data point can only be smaller than or equal to used value
                this_wav_lower = self.getRatios(data_type='central_mwav',\
                                                this_id=this_id,\
                                                return_negative=1)
                this_ratio_lower = self.getRatios(return_negative=1,\
                                                  this_id=this_id)
                this_ratio_lower = [abs(r) for r in this_ratio_lower]
                if list(this_wav_lower):
                    waves.append(this_wav_lower)
                    ratios.append(this_ratio_lower)
                    ratios_err.append(None)
                    lp.append('dg')

            if not inst.linefit is None:
                #-- If integrated intensities are available for the instrument, get
                #   the integrated intensity ratios
                this_wav_int = self.getRatios(sel_type='int_ratios',\
                                              data_type='central_mwav',\
                                              this_id=this_id)
                this_ratio_int = self.getRatios(sel_type='int_ratios',\
                                                data_type='int_ratios',\
                                                this_id=this_id)
                this_ratio_int_err = self.getRatios(sel_type='int_ratios',\
                                                    data_type='int_ratios_err',\
                                                    this_id=this_id)
                if list(this_wav_int):
                    waves.append(this_wav_int)
                    ratios.append(this_ratio_int)
                    ratios_err.append(this_ratio_int_err)
                    lp.append('or')

                #-- Get the ratios that are lower limits due to line blends.
                #   Line blends detected due to fitted FHWM/PACS FHWM > 120%
                #   ie model int can only be larger than or equal to used value
                this_wav_lowerint = self.getRatios(sel_type='int_ratios',\
                                                   data_type='central_mwav',\
                                                   this_id=this_id,\
                                                   return_negative=1)
                this_ratio_lowerint = self.getRatios(sel_type='int_ratios',\
                                                     data_type='int_ratios',\
                                                     this_id=this_id,\
                                                     return_negative=1)
                this_ratio_lowerint_err = self.getRatios(sel_type='int_ratios',\
                                                    data_type='int_ratios_err',\
                                                    this_id=this_id,\
                                                    return_negative=1)
                this_ratio_lowerint = [abs(r) for r in this_ratio_lowerint]
                if list(this_wav_lowerint):
                    waves.append(this_wav_lowerint)
                    ratios.append(this_ratio_lowerint)
                    ratios_err.append(this_ratio_lowerint_err)
                    lp.append('dm')

            #- prepping input for the plot command
            xmin = min([min(x) for x in waves])
            xmax = max([max(x) for x in waves])
            waves.extend([[0.5 * xmin, 1.5 * xmax]] * 3)
            ratios.extend([[1,1],\
                           [1-inst.absflux_err,\
                            1-inst.absflux_err],\
                           [1+inst.absflux_err,\
                            1+inst.absflux_err]])
            ratios_err.extend([None, None, None])
            lp.extend(['-k', '--k', '--k'])
            plot_filename = os.path.join(getattr(cc.path,self.code.lower()),\
                                         self.path_code,'stars',\
                                         self.star_name,\
                                         '%s_results_'%inst.instrument+\
                                         'ratio_wav_%s'%str(this_id))
            labels = [('Mdot = %.2e Msolar/yr'%star['MDOT_GAS'],0.05,0.05),\
                      ('Teff = %.1f K'%star['T_STAR'],0.05,0.1),\
                      ('$\psi$ = %0.2e'%star['DUST_TO_GAS_CHANGE_ML_SP'],0.05,\
                       0.15),\
                      ('A$_{H_2O}$/A$_{H_2}$ = %0.2e'%star['F_H2O'],0.05,0.2),\
                      ('R$_(o,H_2O)$ = %i'%int(star['R_OUTER_H2O']),0.05,0.25)]
            if star.getMolecule('1H1H16O') \
                  and star.getMolecule('1H1H16O').set_keyword_change_abundance:
                labels.append(('$H_2O$ profile = %s'\
                               %os.path.split(star.getMolecule('1H1H16O')\
                                                   .change_fraction_filename\
                                                   .replace('_','\_'))[1],\
                               0.05,0.30))
            plot_title = '%s: $\chi^2_\mathrm{con}$ %.4f'\
                         %(str(this_id).replace('_','\_'),\
                           self.chi2_con[this_id])
            if self.chi2_inttot[this_id]:
                plot_title += ', $\chi^2_\mathrm{int}$ %.4f'\
                              %(self.chi2_inttot[this_id])
            plot_filenames.append(Plotting2.plotCols(\
                    filename=plot_filename,x=waves,y=ratios,yerr=ratios_err,\
                    yaxis=r'$F_{\nu,p,m}/F_{\nu,p,d}$',\
                    plot_title=plot_title,labels=labels,extension='pdf',\
                    xlogscale=0,ylogscale=1,line_types=lp,xmin=xmin*0.9,\
                    xmax=xmax*1.03,figsize=(10.*scipy.sqrt(2.), 10.),\
                    linewidth=2,fontsize_title=20,fontsize_label=16))
        inputf_short = os.path.splitext(os.path.split(inputfilename)[1])[0]
        new_filename = os.path.join(getattr(cc.path,self.code.lower()),\
                                    self.path_code,'stars',self.star_name,\
                                    '%s_results_'%inst.instrument+\
                                    'ratio_wav_%s.pdf'%inputf_short)
        DataIO.joinPdf(old=plot_filenames, new=new_filename)
        print '** Stat plots can be found at:'
        print new_filename
        print '***********************************'
Пример #32
0
 def _evaluate(self, x, t=0.):
     return self._K * (sc.sqrt(x**2. + self._D2) -
                       self._D) + self._F * x**2.
Пример #33
0
def mse(y_test, y):
    return sp.sqrt(sp.mean((y_test - y)**2))
Пример #34
0
def parspar(n):
    i = n/ndim
    j = n%ndim
    data = cubo[:,i,j]
    datamax = data.max()
    noise_level = 0
    noise = sp.random.normal(scale=noise_level,size=len(data))
    data = data+ruido
    if (i-int(ndim/2.))**2 + (j-int(ndim/2.))**2 > r**2 or 0.4*cubomax>datamax:
        return [None]

    print i,j

    m0 = sp.integrate.simps(data,nu)
    m1 = sp.integrate.simps(velocities*data, nu)/m0
    mom2[i,j] = sp.integrate.simps(data*(velocities-m1)**2, nu)*1000/m0


    datamin = data.min()
    data1 = data -data.min()
    centroid = (nu*(cubo[:,i,j]-datamin)).sum()/(cubo[:,i,j]-datamin).sum()

    i0 = datamin
    if i0==0:
        i0=1e-10
    i0_lim=(0.5*i0,1.2*i0)


    r_ij = sp.sqrt((i-128)**2 +(j-128)**2)
    rho_ij = r_ij/sp.cos(incli)

    if rho_ij<30:
        temp_0 = 70
        tlim = (10,200)
    else:
        temp_0 = 70 * (r_ij / 30.)**(-0.5)
        tlim = (10,120)
    vels = (nu-nu0)*3e5/nu0
    velg = vels[data==data.max()][0]

    noise = data[(velocities<velg-1) | (velocities>velg+1.)]
    rms = np.sqrt(sp.sum(noise**2)/float(len(noise)))

## Cont=True Fit lines considering the presence of continuum.
    if cont:
        line_model = pm.Model()
    
        with line_model:
        
            var = ['Temp','nu_c','log(N_CO)','v_turb','Continuum']

# Priors for unknown model parameters
            Temp = pm.TruncatedNormal('Temp', mu=temp_0, sd=5, lower=tlim[0], upper=tlim[1])
            nu_c = pm.TruncatedNormal('nu_c', mu=centroid, sd=abs(dnu)/10., lower=centroid-0.5*abs(dnu), upper=centroid+0.5*abs(dnu))
            NCO = pm.Uniform('log(N_CO)', lower=10, upper=24)
            v_turb = pm.Uniform('v_turb', lower=sp.sqrt(k*tlim[0]/m), upper=300000)  ## This is really broadening in velocity space, not turbulent vel.
            i_0 = pm.TruncatedNormal('Continuum', mu=i0, sd=5, lower=i0_lim[0], upper=i0_lim[1])
    
    # Expected value of outcome
            predict = intensity_continuum(nu, Temp, nu_c, alpha, 10**NCO, v_turb, angle, i_0, head, iso)
    
    # Likelihood (sampling distribution) of observations
            Y_obs = pm.Normal('Y_obs', mu=predict, sd=rms, observed=data)
            step = pm.NUTS()
    
            st = {'Temp':temp_0,
                'nu_c':centroid,
                'log(N_CO)':20,
                'v_turb':20000,
                'Continuum':i0}

            trace = pm.sample(5000,tune=1000,cores=2,step=step,start=st)

            stats = pm.summary(trace)

            mean_pars = [stats['mean'][x] for x in var]
            hpd_2_5 = [stats['hpd_2.5'][x] for x in var]
            hpd_97_5 = [stats['hpd_2.5'][x] for x in var]
            var_std = [stats['sd'][x] for x in var]
            medians_pars = [sp.median(trace[x]) for x in var]
            Map = [float(pm.find_MAP(model=line_model)[x]) for x in var]
            fit = mean_pars
            model = intensity_continuum(nu, fit[0], fit[1],alpha, 10**fit[2], fit[3], angle,fit[4], head, iso)

    else:
        line_model = pm.Model()
    
        with line_model:
        
            var = ['Temp','nu_c','log(N_CO)','v_turb']
            
            # Priors for unknown model parameters
            Temp = pm.TruncatedNormal('Temp', mu=temp_0, sd=5, lower=tlim[0], upper=tlim[1])
            nu_c = pm.TruncatedNormal('nu_c', mu=centroid, sd=abs(dnu)/10., lower=centroid-0.5*abs(dnu), upper=centroid+0.5*abs(dnu))
            NCO = pm.Uniform('log(N_CO)', lower=10, upper=24)
            v_turb = pm.Uniform('v_turb', lower=sp.sqrt(k*tlim[0]/m), upper=300000) ## This is really broadening in velocity space, not turbulent vel.
            
            # Expected value of outcome
            predict = intensity(nu, Temp, nu_c, 10**NCO, v_turb, angle, head, iso)
            
            # Likelihood (sampling distribution) of observations
            Y_obs = pm.Normal('Y_obs', mu=predict, sd=rms, observed=data)
            step = pm.NUTS()
            
            st = {'Temp':temp_0,
                'nu_c':centroid,
                'log(N_CO)':20,
                'v_turb':20000}
        
            trace = pm.sample(5000,tune=1000,cores=2,step=step,start=st)
            
            stats = pm.summary(trace)
            
            mean_pars = [stats['mean'][x] for x in var]
            hpd_2_5 = [stats['hpd_2.5'][x] for x in var]
            hpd_97_5 = [stats['hpd_2.5'][x] for x in var]
            var_std = [stats['sd'][x] for x in var]
            medians_pars = [sp.median(trace[x]) for x in var]
            Map = [float(pm.find_MAP(model=line_model)[x]) for x in var]
            fit = mean_pars
            model = intensity(nu, fit[0], fit[1], 10**fit[2], fit[3], angle, head, iso)

    Temperature[i,j,:] = sp.array([fit[0],var_std[0],medians_pars[0],Map[0],hpd_2_5[0],hpd_2_5[0]])
    Denscol[i,j,:] = sp.array([10**fit[2],10**fit[2]*sp.log(10)*var_std[2],10**medians_pars[2],10**Map[2],10**hpd_2_5[2],10**hpd_2_5[2]])
    Turbvel[i,j,:] = sp.sqrt(((sp.array([fit[3],var_std[3],medians_pars[3],Map[3],hpd_2_5[3],hpd_2_5[3]]))**2 - k*fit[0]/m))*1e-5
    aux_nu = sp.array([fit[1],var_std[1],medians_pars[1],Map[1],hpd_2_5[1],hpd_2_5[1]])
    vel_cen[i,j,:] = sp.around(((nu0-aux_nu)*c*1e-5/nu0))
    return [i,j,Temperature[i,j,:], Denscol[i,j,:], Turbvel[i,j,:],vel_cen[i,j,:]]
Пример #35
0
#  Isotopologue image and centroid map
cubo, head = datafits(image)

dnu = head['CDELT3']
len_nu = head['NAXIS3']
nui = head['CRVAL3']- head['CRPIX3']*dnu
nuf = nui + (len_nu-1)*dnu

nu = sp.linspace(nui, nuf, len_nu)
nu0 = sp.mean(nu)

#Gaussian Convolution
if False:
    resol = abs(head['CDELT1'])*3600
    stdev = Beam / (2 * sp.sqrt (2 * sp.log(2)))
    stdev /= resol
    x_size = int(8*stdev + 1.)

    print 'convolution with gaussian'
    print '\tbeam '+str(Beam)+' arcsec'
    print '\tbeam '+str(stdev)+' pixels'

    # circular Gaussian
    beam = Gaussian2DKernel (stddev = stdev, x_size = x_size, y_size = x_size,
                             model ='integrate')
    smooth =  np.zeros((80, 256, 256))
    for k in range(80):
        smooth[k, :,:] += convolve_fft(cubo[k,:,:], beam)
    print '\tsmoothed'
    cubo = smooth
def psi_n(n, x):
    psi0 = 0
    for m in range(100):
        psi0 += sqrt(2 / L) * psi[n][m] * sin(pi * (m + 1) * x / L)
    return psi0
Пример #37
0
def main():

    (sensdict,simparams) = readconfigfile(inifile)
    simdtype = simparams['dtype']
    sumrule = simparams['SUMRULE']
    npts = simparams['numpoints']
    amb_dict = simparams['amb_dict']
    # for spectrum
    ISS2 = ISRSpectrum(centerFrequency = 440.2*1e6, bMag = 0.4e-4, nspec=npts, sampfreq=sensdict['fs'],dFlag=True)
    ti = 2e3
    te = 2e3
    Ne = 1e11
    Ni = 1e11


    datablock90 = sp.array([[Ni,ti],[Ne,te]])
    species = simparams['species']

    (omega,specorig,rcs) = ISS2.getspecsep(datablock90, species,rcsflag = True)

    cur_filt = sp.sqrt(scfft.ifftshift(specorig*npts*npts*rcs/specorig.sum()))
    #for data
    Nrep = 10000
    pulse = sp.ones(14)
    lp_pnts = len(pulse)
    N_samps = 100
    minrg = -sumrule[0].min()
    maxrg = N_samps+lp_pnts-sumrule[1].max()
    Nrng2 = maxrg-minrg;
    out_data = sp.zeros((Nrep,N_samps+lp_pnts),dtype=simdtype)
    samp_num = sp.arange(lp_pnts)
    for isamp in range(N_samps):
        cur_pnts = samp_num+isamp
        cur_pulse_data = MakePulseDataRep(pulse,cur_filt,rep=Nrep)
        out_data[:,cur_pnts] = cur_pulse_data+out_data[:,cur_pnts]

    lagsData = CenteredLagProduct(out_data,numtype=simdtype,pulse =pulse)
    lagsData=lagsData/Nrep # divide out the number of pulses summed
    Nlags = lagsData.shape[-1]
    lagsDatasum = sp.zeros((Nrng2,Nlags),dtype=lagsData.dtype)
    for irngnew,irng in enumerate(sp.arange(minrg,maxrg)):
        for ilag in range(Nlags):
            lagsDatasum[irngnew,ilag] = lagsData[irng+sumrule[0,ilag]:irng+sumrule[1,ilag]+1,ilag].mean(axis=0)
    lagsDatasum=lagsDatasum/lp_pnts # divide out the pulse length
    (tau,acf) = spect2acf(omega,specorig)

    # apply ambiguity function
    tauint = amb_dict['Delay']
    acfinterp = sp.zeros(len(tauint),dtype=simdtype)

    acfinterp.real =spinterp.interp1d(tau,acf.real,bounds_error=0)(tauint)
    acfinterp.imag =spinterp.interp1d(tau,acf.imag,bounds_error=0)(tauint)
    # Apply the lag ambiguity function to the data
    guess_acf = sp.zeros(amb_dict['Wlag'].shape[0],dtype=sp.complex128)
    for i in range(amb_dict['Wlag'].shape[0]):
        guess_acf[i] = sp.sum(acfinterp*amb_dict['Wlag'][i])

#    pdb.set_trace()
    guess_acf = guess_acf*rcs/guess_acf[0].real

    # fit to spectrums
    spec_interm = scfft.fftshift(scfft.fft(guess_acf,n=npts))
    spec_final = spec_interm.real
    allspecs = scfft.fftshift(scfft.fft(lagsDatasum,n=len(spec_final),axis=-1),axes=-1)
#    allspecs = scfft.fftshift(scfft.fft(lagsDatasum,n=npts,axis=-1),axes=-1)
    fig = plt.figure()
    plt.plot(omega,spec_final.real,label='In',linewidth=5)
    plt.hold(True)
    plt.plot(omega,allspecs[40].real,label='Out',linewidth=5)
    plt.axis((omega.min(),omega.max(),0.0,2e11))
    plt.show(False)
Пример #38
0
def main():

    fm = fmtest()

    tstart = time.time()
    fm.run()
    tend = time.time()

    if 1:
        fig1 = pylab.figure(1, figsize=(12, 10), facecolor="w")
        fig2 = pylab.figure(2, figsize=(12, 10), facecolor="w")
        fig3 = pylab.figure(3, figsize=(12, 10), facecolor="w")

        Ns = 10000
        Ne = 100000

        fftlen = 8192
        winfunc = scipy.blackman

        # Plot transmitted signal
        fs = fm._if_rate

        d = fm.snk_tx.data()[Ns:Ns + Ne]
        sp1_f = fig1.add_subplot(2, 1, 1)

        X, freq = sp1_f.psd(d,
                            NFFT=fftlen,
                            noverlap=fftlen / 4,
                            Fs=fs,
                            window=lambda d: d * winfunc(fftlen),
                            visible=False)
        X_in = 10.0 * scipy.log10(abs(fftpack.fftshift(X)))
        f_in = scipy.arange(-fs / 2.0, fs / 2.0, fs / float(X_in.size))
        p1_f = sp1_f.plot(f_in, X_in, "b")
        sp1_f.set_xlim([min(f_in), max(f_in) + 1])
        sp1_f.set_ylim([-120.0, 20.0])

        sp1_f.set_title("Input Signal", weight="bold")
        sp1_f.set_xlabel("Frequency (Hz)")
        sp1_f.set_ylabel("Power (dBW)")

        Ts = 1.0 / fs
        Tmax = len(d) * Ts

        t_in = scipy.arange(0, Tmax, Ts)
        x_in = scipy.array(d)
        sp1_t = fig1.add_subplot(2, 1, 2)
        p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
        #p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
        sp1_t.set_ylim([-5, 5])

        # Set up the number of rows and columns for plotting the subfigures
        Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
        Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
        if (fm.num_rx_channels() % Ncols != 0):
            Nrows += 1

        # Plot each of the channels outputs. Frequencies on Figure 2 and
        # time signals on Figure 3
        fs_o = fm._audio_rate
        for i in xrange(len(fm.snks)):
            # remove issues with the transients at the beginning
            # also remove some corruption at the end of the stream
            #    this is a bug, probably due to the corner cases
            d = fm.snks[i].data()[Ns:Ne]

            sp2_f = fig2.add_subplot(Nrows, Ncols, 1 + i)
            X, freq = sp2_f.psd(d,
                                NFFT=fftlen,
                                noverlap=fftlen / 4,
                                Fs=fs_o,
                                window=lambda d: d * winfunc(fftlen),
                                visible=False)
            #X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
            X_o = 10.0 * scipy.log10(abs(X))
            #f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
            f_o = scipy.arange(0, fs_o / 2.0, fs_o / 2.0 / float(X_o.size))
            p2_f = sp2_f.plot(f_o, X_o, "b")
            sp2_f.set_xlim([min(f_o), max(f_o) + 0.1])
            sp2_f.set_ylim([-120.0, 20.0])
            sp2_f.grid(True)

            sp2_f.set_title(("Channel %d" % i), weight="bold")
            sp2_f.set_xlabel("Frequency (kHz)")
            sp2_f.set_ylabel("Power (dBW)")

            Ts = 1.0 / fs_o
            Tmax = len(d) * Ts
            t_o = scipy.arange(0, Tmax, Ts)

            x_t = scipy.array(d)
            sp2_t = fig3.add_subplot(Nrows, Ncols, 1 + i)
            p2_t = sp2_t.plot(t_o, x_t.real, "b")
            p2_t = sp2_t.plot(t_o, x_t.imag, "r")
            sp2_t.set_xlim([min(t_o), max(t_o) + 1])
            sp2_t.set_ylim([-1, 1])

            sp2_t.set_xlabel("Time (s)")
            sp2_t.set_ylabel("Amplitude")

        pylab.show()
Пример #39
0
def l1qc_newton(x0,
                u0,
                A,
                b,
                epsilon,
                tau,
                newtontol=np.float64(1e-3),
                newtonmaxiter=50,
                cgtol=np.float64(1e-8),
                cgmaxiter=200):
    # line search params
    f64one = np.float64(1.)
    alpha = np.float64(0.01)
    beta = np.float64(0.5)
    AtA = np.float64(A.T.dot(A))
    # initial point
    x = np.float64(x0)
    u = np.float64(u0)
    r = np.float64(A.dot(x) - b)
    fu1 = np.float64(x - u)
    fu2 = np.float64(-x - u)
    fe = np.float64(0.5) * (r.T.dot(r) - epsilon**2)
    f = np.float64(
        sum(u) - (f64one / tau) * (sum(log(-fu1)) + sum(log(-fu2)) + log(-fe)))
    niter = 0
    done = False
    while not done:
        atr = A.T.dot(r)
        ntgz = f64one / fu1 - f64one / fu2 + f64one / fe * atr
        ntgu = -tau - f64one / fu1 - f64one / fu2
        gradf = -(f64one / tau) * np.hstack((ntgz, ntgu))

        sig11 = f64one / fu1**2 + f64one / fu2**2
        sig12 = -f64one / fu1**2 + f64one / fu2**2
        sigx = sig11 - np.float64(sig12**2) / sig11

        w1p = ntgz - np.float64(sig12) / sig11 * ntgu
        H11p = np.diag(sigx) - (f64one / fe) * AtA + (f64one /
                                                      fe)**2 * atr.dot(atr.T)
        dx = np.linalg.solve(H11p, w1p)
        Adx = A.dot(dx)
        du = (f64one / sig11) * ntgu - (np.float64(sig12) / sig11) * dx
        # minimum step size that stays in the interior
        ifu1 = [i for i in xrange(len(dx)) if dx[i] - du[i] > 0]
        ifu2 = [i for i in xrange(len(dx)) if -dx[i] - du[i] > 0]
        aqe = Adx.T.dot(Adx)
        bqe = np.float64(2) * r.T.dot(Adx)
        cqe = r.T.dot(r) - epsilon**2
        temp = -fu1[ifu1] / (dx[ifu1] - du[ifu1])
        smax = np.float64(1)
        if len(temp) > 0:
            smax = min(smax, np.min(temp))
        temp = -fu2[ifu2] / (-dx[ifu2] - du[ifu2])
        if len(temp) > 0:
            smax = min(smax, np.min(temp))
        smax = min(smax, (-bqe + sqrt(bqe**2 - 4. * aqe * cqe)) / (2. * aqe))
        s = np.float64(0.99) * smax

        # backtracking line search
        suffdec = False
        backiter = 0
        while not suffdec:
            xp = x + s * dx
            up = u + s * du
            rp = r + s * Adx
            fu1p = xp - up
            fu2p = -xp - up
            fep = f64one / 2 * (rp.T.dot(rp) - epsilon**2)
            fp = sum(up) - (f64one / tau) * (sum(log(-fu1p)) +
                                             sum(log(-fu2p)) + log(-fep))
            flin = f + alpha * s * (gradf.T.dot(np.hstack((dx, du))))
            # print fep,fp,flin
            suffdec = (fp <= flin)
            s = beta * s
            backiter = backiter + 1
            if backiter > 48:
                # print "Stuck on backtracking line search, returning previous iterate."
                xp = x
                up = u
                break
        # set upt for next iteration
        x = xp
        u = up
        r = rp
        fu1 = fu1p
        fu2 = fu2p
        fe = fep
        f = fp
        lambda2 = np.float64(-(gradf.T.dot(np.hstack((dx, du)))))
        stepsize = np.float64(s * norm(np.hstack((dx, du))))
        niter = niter + 1
        done = (lambda2 / 2 < newtontol) or (niter >= newtonmaxiter)
        # print "Newton iter = %d, Functional = %8.3f, Newton decrement = %8.3f, Stepsize = %8.3e"%(niter, f, lambda2/2, stepsize)
    return xp, up, niter
Пример #40
0
def Mittelwert(x):  #Funktionsdefinition: def Name(argumente): Blocks mit tab
    mw = sp.mean(x)
    N = len(x)
    f_mw = sp.sqrt((sp.sum((x - mw)**2)) / (N * (N - 1)))
    return mw, f_mw
Пример #41
0
def directed_laplacian_matrix(G,
                              nodelist=None,
                              weight='weight',
                              walk_type=None,
                              alpha=0.95):
    r"""Return the directed Laplacian matrix of G.

    The graph directed Laplacian is the matrix

    .. math::

        L = I - (\Phi^{1/2} P \Phi^{-1/2} + \Phi^{-1/2} P^T \Phi^{1/2} ) / 2

    where `I` is the identity matrix, `P` is the transition matrix of the
    graph, and `\Phi` a matrix with the Perron vector of `P` in the diagonal and
    zeros elsewhere.

    Depending on the value of walk_type, `P` can be the transition matrix
    induced by a random walk, a lazy random walk, or a random walk with
    teleportation (PageRank).

    Parameters
    ----------
    G : DiGraph
       A NetworkX graph

    nodelist : list, optional
       The rows and columns are ordered according to the nodes in nodelist.
       If nodelist is None, then the ordering is produced by G.nodes().

    weight : string or None, optional (default='weight')
       The edge data key used to compute each value in the matrix.
       If None, then each edge has weight 1.

    walk_type : string or None, optional (default=None)
       If None, `P` is selected depending on the properties of the
       graph. Otherwise is one of 'random', 'lazy', or 'pagerank'

    alpha : real
       (1 - alpha) is the teleportation probability used with pagerank

    Returns
    -------
    L : NumPy array
      Normalized Laplacian of G.

    Raises
    ------
    NetworkXError
        If NumPy cannot be imported

    NetworkXNotImplemnted
        If G is not a DiGraph

    Notes
    -----
    Only implemented for DiGraphs

    See Also
    --------
    laplacian_matrix

    References
    ----------
    .. [1] Fan Chung (2005).
       Laplacians and the Cheeger inequality for directed graphs.
       Annals of Combinatorics, 9(1), 2005
    """
    import scipy as sp
    from scipy.sparse import identity, spdiags, linalg
    if walk_type is None:
        if nx.is_strongly_connected(G):
            if nx.is_aperiodic(G):
                walk_type = "random"
            else:
                walk_type = "lazy"
        else:
            walk_type = "pagerank"

    M = nx.to_scipy_sparse_matrix(G,
                                  nodelist=nodelist,
                                  weight=weight,
                                  dtype=float)
    n, m = M.shape
    if walk_type in ["random", "lazy"]:
        DI = spdiags(1.0 / sp.array(M.sum(axis=1).flat), [0], n, n)
        if walk_type == "random":
            P = DI * M
        else:
            I = identity(n)
            P = (I + DI * M) / 2.0

    elif walk_type == "pagerank":
        if not (0 < alpha < 1):
            raise nx.NetworkXError('alpha must be between 0 and 1')
        # this is using a dense representation
        M = M.todense()
        # add constant to dangling nodes' row
        dangling = sp.where(M.sum(axis=1) == 0)
        for d in dangling[0]:
            M[d] = 1.0 / n
        # normalize
        M = M / M.sum(axis=1)
        P = alpha * M + (1 - alpha) / n
    else:
        raise nx.NetworkXError("walk_type must be random, lazy, or pagerank")

    evals, evecs = linalg.eigs(P.T, k=1)
    v = evecs.flatten().real
    p = v / v.sum()
    sqrtp = sp.sqrt(p)
    Q = spdiags(sqrtp, [0], n, n) * P * spdiags(1.0 / sqrtp, [0], n, n)
    I = sp.identity(len(G))

    return I - (Q + Q.T) / 2.0
# - t_value = (mu - x) / se

# 標本平均
mu = sp.mean(junk_food)
mu

# 自由度
df = len(junk_food) - 1
df

# 標本標準偏差
sigma = sp.std(junk_food, ddof=1)
sigma

# 標準誤差
se = sigma / sp.sqrt(len(junk_food))
se

# t値
# --- 理論値
t_value = (mu - 50) / se
t_value

# 3 t検定の実装:p値の計算 --------------------------------------------------------------

# p値
alpha = stats.t.cdf(t_value, df=df)
(1 - alpha) * 2

# 関数によるt検定
# --- t値とp値が直接出力される
Пример #43
0
def extract(data, varimg, width=WIDTH, nsig=NSIG, noise=NOISE):
    WIDTH = width
    NSIG = nsig
    NOISE = noise

    data = data.copy()
    spectra = []

    # Replace nan with zero
    data[scipy.isnan(data)] = 0.
    varimg[scipy.isnan(varimg)] = 0.

    # Create model of real flux. We ignore the slit ends, which may have
    #  artifacts from the resampling.
    slit = data[:, 8:-8].astype(scipy.float32)
    var = varimg[:, 8:-8]

    # OK...so negative-variance also isn't good; set these pixels to zero
    var[var < 0] = 0

    # Create noise models
    sigmaimg = slit / scipy.sqrt(var)
    highpix = scipy.where(sigmaimg > 1.5, sigmaimg, 0.)
    source_columns = highpix.sum(axis=0)

    # MASKING DISABLED (this would take only columns with lotsa flux...)
    #	mask = scipy.where(source_columns>4.,1.,scipy.nan)
    mask = source_columns * 0.

    # Condition 1, dealing with bad pixels
    if (var == 0).any():
        cond = var == 0
        var[cond] = scipy.nan
        slit[cond] = scipy.nan
        mask = scipy.where(cond, 0, 1)
        flux = scipy.nansum(slit / var, axis=1) / scipy.nansum(1. / var,
                                                               axis=1)
        noise = scipy.sqrt(scipy.nansum(var, axis=1)) / mask.sum(axis=1)
    # Condition 2, no masking
    elif scipy.nansum(mask) == 0:
        flux = (slit / var).sum(axis=1) / (1. / var).sum(axis=1)
        noise = scipy.sqrt(var.sum(axis=1)) / mask.size
    # Condition 3, masking
    else:
        fluxmodel = slit * mask
        noisemodel = var * mask

        noise = scipy.sqrt(scipy.nansum(noisemodel,
                                        axis=1)) / scipy.nansum(mask)
        flux = stats.stats.nanmean(fluxmodel, axis=1)

    # A smooth S/N estimate for the slit
#	sig2noise = ndimage.gaussian_filter1d(flux,1)/noise

    row = scipy.arange(flux.size)
    model = flux.copy()
    nspec = 10  # Maximum number of attempts
    while nspec:
        nspec -= 1

        # Fit a gaussian around the peak of the S/N model
        start = model.argmax() - WIDTH
        end = model.argmax() + WIDTH + 1
        if start < 0:
            start = 0.
        if end > model.size:
            end = model.size

        fitarr = model[start:end]
        p = scipy.zeros(4)
        p[1] = fitarr.max()
        p[2] = fitarr.argmax()
        p[3] = 2.

        fit, val = special_functions.ngaussfit(fitarr, p)
        chi2 = val / (fitarr.size - 3)
        fit[2] += start

        # If the centroid doesn't lie on the slit, get use the edge pix
        midcol = fit[2].round()
        if midcol >= flux.size:
            midcol = flux.size - 1
        elif midcol < 0:
            midcol = 0
        # Require a reasonable S/N and width
        if fit[3] > fitarr.size / 2. or fit[3] < 0.85:
            break
        elif fit[0] > 0 and fit[1] < NOISE * noise[midcol]:
            break
        elif fit[0] < 0 and fit[1] - fit[0] < NOISE * noise[midcol]:
            break
        else:
            fit[1] += fit[0]
            fit[0] = 0.
            # Subtract away a model of the source
            source = special_functions.ngauss(row, fit)
            model -= scipy.where(source > noise, source, 0.)

            # Skip residuals!
            if fit[2] < flux.size and fit[1] < scipy.sqrt(flux[fit[2]]):
                continue
            fit[1] = 1.
            weight = special_functions.ngauss(row, fit)
            cond = (row > fit[2] - fit[3] * NSIG) & (row <
                                                     fit[2] + fit[3] * NSIG)
            weight = scipy.where(cond, weight, 0)
            weight /= weight.sum()
            spec = weight * data.T
            spec = spec.sum(axis=1)
            varspec = weight * varimg.T
            varspec = varspec.sum(axis=1)
            spec[varspec == 0] = 0.
            smooth = signal.wiener(spec, FILTSIZE, varspec)
            smooth[scipy.isnan(smooth)] = 0.
            spectra.append([fit, spec, smooth, varspec])
    return spectra
Пример #44
0
def normalized_laplacian_matrix(G, nodelist=None, weight='weight'):
    r"""Return the normalized Laplacian matrix of G.

    The normalized graph Laplacian is the matrix

    .. math::

        N = D^{-1/2} L D^{-1/2}

    where `L` is the graph Laplacian and `D` is the diagonal matrix of
    node degrees.

    Parameters
    ----------
    G : graph
       A NetworkX graph

    nodelist : list, optional
       The rows and columns are ordered according to the nodes in nodelist.
       If nodelist is None, then the ordering is produced by G.nodes().

    weight : string or None, optional (default='weight')
       The edge data key used to compute each value in the matrix.
       If None, then each edge has weight 1.

    Returns
    -------
    N : NumPy matrix
      The normalized Laplacian matrix of G.

    Notes
    -----
    For MultiGraph/MultiDiGraph, the edges weights are summed.
    See to_numpy_matrix for other options.

    If the Graph contains selfloops, D is defined as diag(sum(A,1)), where A is
    the adjacency matrix [2]_.

    See Also
    --------
    laplacian_matrix

    References
    ----------
    .. [1] Fan Chung-Graham, Spectral Graph Theory,
       CBMS Regional Conference Series in Mathematics, Number 92, 1997.
    .. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized
       Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98,
       March 2007.
    """
    import scipy
    import scipy.sparse
    if nodelist is None:
        nodelist = G.nodes()
    A = nx.to_scipy_sparse_matrix(G,
                                  nodelist=nodelist,
                                  weight=weight,
                                  format='csr')
    n, m = A.shape
    diags = A.sum(axis=1).flatten()
    D = scipy.sparse.spdiags(diags, [0], m, n, format='csr')
    L = D - A
    with scipy.errstate(divide='ignore'):
        diags_sqrt = 1.0 / scipy.sqrt(diags)
    diags_sqrt[scipy.isinf(diags_sqrt)] = 0
    DH = scipy.sparse.spdiags(diags_sqrt, [0], m, n, format='csr')
    return DH.dot(L.dot(DH))
Пример #45
0
    print count1,

nSub = sub_data.shape[2]

cat_data = sp.zeros((nSub * sub_data.shape[0], sub_data.shape[1]))

for ind in range(nSub):
    sub_data[:, :, ind] = rot_sub_data(ref=sub_data[:, :, 0],
                                       sub=sub_data[:, :, ind])
    #    cat_data[sub_data.shape[0]*ind:sub_data.shape[0]*(ind+1),:] = sub_data[:,:,ind]
    print ind, sub_data.shape, cat_data.shape

SC = DBSCAN(
    eps=.1
)  #,min_samples=50,leaf_size=50)# min_cluster_size=5) #KMeans(n_clusters=nClusters,random_state=5324)
lab_sub = sp.zeros((sub_data.shape[0], nSub))
for ind in [0]:  # range(nSub):
    dat1 = (0.000001 + sub_data[:, :, ind]) / sp.sum(
        0.000001 + sp.sqrt(sub_data[:, :, ind] * sub_data[:, :, ind]),
        axis=1)[:, None]
    met = sp.absolute(sp.arccos(sp.corrcoef(dat1)))
    lab_sub[:, ind] = SC.fit_predict(dat1)
    print ind, sp.amax(lab_sub)
#labs_all = SC.fit_predict(cat_data)

#lab_sub=labs_all.reshape((sub_data.shape[0],nSub),order='F')
sp.savez_compressed('labs_all_data1_rot_individual_nclusters_30_HDBSCAN_sub0',
                    lab_sub=lab_sub,
                    cat_data=cat_data,
                    lst=lst)
Пример #46
0
            H_tau.set_xlabel('tau')
            H_tau.set_ylabel('H')
            H_tau.set_title('Imaginary time evolution: Energy')
            M_tau = fig2.add_subplot(111)
            M_tau.set_xlabel('tau')
            M_tau.set_ylabel('M')
            M_tau.set_title('Imaginary time evolution: Magnetization')

            H_tau.plot(tau, H)
            M_tau.plot(tau, M)

        plt.figure()
        ex_p = sp.array(ex_p).ravel()
        ex_ev = sp.array(ex_ev).ravel()
        ex_ev_nt = sp.array(ex_ev_nt).ravel()
        plt.plot(ex_p, ex_ev, 'bo', label='top. trivial')
        if top_non_triv:
            plt.plot(ex_p, ex_ev_nt, 'ro', label='top. non-trivial')

        elem_ex = lambda p: 2 * J * sp.sqrt(1 + h**2 / J**2 - 2 * h / J * sp.cos(p))
        p = sp.linspace(0, sp.pi, num=100)
        plt.plot(p, elem_ex(p), 'c-', label='exact elem. excitation')

        plt.title('Excitation spectrum')
        plt.xlabel('p')
        plt.ylabel('dE')
        plt.ylim(0, max(ex_ev.max(), ex_ev_nt.max()) * 1.1)
        plt.legend()

        plt.show()
Пример #47
0
 def Spot_size(self, z):
     """Returns the spot size of the beam at axial position z."""
     return self.waist * sp.sqrt(1 + (z / self.RL)**2)
Пример #48
0
def PlotHist(field, imfile=None):
    """ field is a HistMaker object """
    H = field.H
    # Scale and flip if necessary
    vmin = field.varea[0]
    if field.varea[1] == None: vmax = np.ma.max(H)
    else: vmax = field.varea[1]
    if field.scale == 'sqrt':
        field.H = sc.sqrt(H)
        vmax = sc.sqrt(vmax)
    elif field.scale == 'log':
        field.H = sc.log10(H)
        vmax = sc.log10(vmax)
    if field.xflip == 1: H = H[:, ::-1]
    if field.yflip == 1: H = H[::-1, :]
    # Plot the image
    if field.cmap == 'color': cmap = spectral_wb
    elif field.cmap == 'color_c': cmap = spec_center
    elif field.cmap == 'bw': cmap = 'gist_yarg'
    else: cmap = field.cmap
    plt.figure()
    plt.imshow(H,
               cmap=cmap,
               norm=None,
               aspect=None,
               interpolation='nearest',
               alpha=None,
               vmin=vmin,
               vmax=vmax,
               origin='lower',
               extent=None)
    cbar = plt.colorbar()  #May need to adjust these ticks...
    # Add xTicks and labels
    xlocs, xlabels, ylocs, ylabels = [], [], [], []
    if field.ticks[0] == None:
        xpos = np.arange(field.xmin, field.xmax + 0.000001,
                         ((field.xmax - field.xmin) / 10.0))
    else:
        xpos = field.ticks[0]
    for pos in xpos:
        xlocs.append((pos - field.xmin) / field.xsize)
        xlabels.append("${0:.1f}$".format(pos))
    if field.xflip == 1:
        plt.xticks(xlocs, xlabels[::-1], rotation=45, fontsize=14)
    else:
        plt.xticks(xlocs, xlabels, rotation=45, fontsize=14)
    # Add yTicks and labels
    if field.ticks[1] == None:
        ypos = np.arange(field.ymin, field.ymax + 0.000001,
                         ((field.ymax - field.ymin) / 10.0))
    else:
        ypos = field.ticks[1]
    for pos in ypos:
        ylocs.append((pos - field.ymin) / field.ysize)
        ylabels.append("${0:.1f}$".format(pos))
    if field.yflip == 1:
        plt.yticks(ylocs, ylabels[::-1], rotation=0, fontsize=14)
    else:
        plt.yticks(ylocs, ylabels, rotation=0, fontsize=14)
    # Add axes labels
    if field.labels[0] != None: plt.xlabel(field.labels[0])
    if field.labels[1] != None: plt.ylabel(field.labels[1])
    # Save file, if outfile declared
    if imfile != None: plt.savefig(imfile)
    else: plt.show()
    plt.close('all')
Пример #49
0
    def __init__(self,
                 f,
                 mu,
                 amat,
                 eta_mu=1.0,
                 eta_sigma=None,
                 eta_bmat=None,
                 npop=None,
                 use_fshape=True,
                 use_adasam=False,
                 patience=100,
                 n_jobs=1):
        self.f = f
        self.mu = mu
        self.eta_mu = eta_mu
        self.use_adasam = use_adasam
        self.n_jobs = n_jobs

        dim = len(mu)
        sigma = abs(det(amat))**(1.0 / dim)
        bmat = amat * (1.0 / sigma)
        self.dim = dim
        self.sigma = sigma
        self.bmat = bmat

        # default population size and learning rates
        npop = int(4 + 3 * log(dim)) if npop is None else npop
        eta_sigma = 3 * (3 + log(dim)) * (
            1.0 / (5 * dim * sqrt(dim))) if eta_sigma is None else eta_sigma
        eta_bmat = 3 * (3 + log(dim)) * (
            1.0 / (5 * dim * sqrt(dim))) if eta_bmat is None else eta_bmat
        self.npop = npop
        self.eta_sigma = eta_sigma
        self.eta_bmat = eta_bmat

        # compute utilities if using fitness shaping
        if use_fshape:
            a = log(1 + 0.5 * npop)
            utilities = array([max(0, a - log(k)) for k in range(1, npop + 1)])
            utilities /= sum(utilities)
            utilities -= 1.0 / npop  # broadcast
            utilities = utilities[::-1]  # ascending order
        else:
            utilities = None
        self.use_fshape = use_fshape
        self.utilities = utilities

        # stuff for adasam
        self.eta_sigma_init = eta_sigma
        self.sigma_old = None

        # logging
        self.fitness_best = None
        self.mu_best = None
        self.done = False
        self.counter = 0
        self.patience = patience
        self.history = {'eta_sigma': [], 'sigma': [], 'fitness': []}

        # do not use these when hill-climbing
        if npop == 1:
            self.use_fshape = False
            self.use_adasam = False
Пример #50
0
    if r == 'petit':
        rev = False
    else:
        rev = True

    rankings = []
    for i in range(n):
        row = [col[i] for col in args]
        row_sort = sorted(row, reverse=rev)
        rankings.append([
            row_sort.index(v) + 1 + (row_sort.count(v) - 1) / 2. for v in row
        ])

    rankings_avg = [sp.mean([case[j] for case in rankings]) for j in range(k)]
    rankings_cmp = [r / sp.sqrt(k * (k + 1) / (6. * n)) for r in rankings_avg]

    chi2 = ((12 * n) / float(
        (k * (k + 1)))) * ((sp.sum(r**2 for r in rankings_avg)) -
                           ((k * (k + 1)**2) / float(4)))
    iman_davenport = ((n - 1) * chi2) / float((n * (k - 1) - chi2))

    p_value = 1 - st.f.cdf(iman_davenport, k - 1, (k - 1) * (n - 1))

    return iman_davenport, p_value, rankings_avg, rankings_cmp


def wilco_approche_adapt_vs_nonadapt(df_metrics_adapt, df_metrics_nonadapt,
                                     metric, timeParams, datasets, methods):
    cols = ['method', 'timeParam', 'null_hypothesis_rejected', 'z']
    print('METS : ', methods)
Пример #51
0
 def correctedFWHM(self, FWHM_PSF):
     return scipy.sqrt(self.FWHMnm()**2 - self.FWHM_PSF**2)
Пример #52
0
 def __init__(self, limit):
     self.primes = Primes.MakePrimeList(int(sp.sqrt(2*limit)))
     self.memo   = {}
Пример #53
0
import matplotlib.pyplot as plt

in_degrees = G.in_degree()
in_h = Counter(in_degrees.values())
in_dic = collections.OrderedDict(sorted(in_h.items()))
in_hist = list(in_dic.values())
in_values = list(in_dic.keys())

out_degrees = G.out_degree()
out_h = Counter(out_degrees.values())
out_dic = collections.OrderedDict(sorted(out_h.items()))
out_hist = list(out_dic.values())
out_values = list(out_dic.keys())

mu = 2.17
sigma = sp.sqrt(mu)
mu_plus_sigma = mu + sigma
x = range(0, 10)
prob = stats.poisson.pmf(x, mu) * 4426

plt.figure(figsize=(12, 8))
plt.grid(True)
plt.loglog(out_values, out_hist, 'ro-')  # in-degree
plt.loglog(in_values, in_hist, 'bv-')  # in-degree
plt.plot(x, prob, "o-", color="black")
plt.legend(['In-degree', 'Out-degree', 'Poission'])
plt.xlabel('Degree')
plt.ylabel('Number of  nodes')
plt.title('Manhatten Street Network')
plt.xlim([0, 2 * 10**2])
plt.show()
Пример #54
0
def extract_pore_network(im, dt=None, voxel_size=1):
    r"""
    Analyzes an image that has been partitioned into pore regions and extracts
    the pore and throat geometry as well as network connectivity.

    Parameters
    ----------
    im : ND-array
        An image of the pore space partitioned into individual pore regions.
        Note that this image must have zeros indicating the solid phase.

    dt : ND-array
        The distance transform of the pore space.  If not given it will be
        calculated, but it can save time to provide one if available.

    voxel_size : scalar
        The resolution of the image, expressed as the length of one side of a
        voxel, so the volume of a voxel would be **voxel_size**-cubed.  The
        default is 1, which is useful when overlaying the PNM on the original
        image since the scale of the image is alway 1 unit lenth per voxel.

    Returns
    -------
    A dictionary containing all the pore and throat size data, as well as the
    network topological information.  The dictionary names use the OpenPNM
    convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
    directly to an OpenPNM network object using the ``update`` command.
    """
    print('_' * 60)
    print('Extracting pore and throat information from image')
    from skimage.morphology import disk, square, ball, cube
    if im.ndim == 2:
        cube = square
        ball = disk

    if ~sp.any(im == 0):
        raise Exception('The received image has no solid phase (0\'s)')

    if dt is None:
        dt = spim.distance_transform_edt(im > 0)
        dt = spim.gaussian_filter(input=dt, sigma=0.5)

    # Get 'slices' into im for each pore region
    slices = spim.find_objects(im)

    # Initialize arrays
    Ps = sp.arange(1, sp.amax(im) + 1)
    Np = sp.size(Ps)
    p_coords = sp.zeros((Np, im.ndim), dtype=float)
    p_volume = sp.zeros((Np, ), dtype=float)
    p_dia_local = sp.zeros((Np, ), dtype=float)
    p_dia_global = sp.zeros((Np, ), dtype=float)
    p_label = sp.zeros((Np, ), dtype=int)
    p_area_surf = sp.zeros((Np, ), dtype=int)
    p_area_solid = sp.zeros((Np, ), dtype=int)
    t_conns = []
    t_dia_inscribed = []
    t_area = []
    t_perimeter = []
    t_coords = []

    # Start extracting size information for pores and throats
    for i in tqdm(Ps):
        pore = i - 1
        #        if slices[pore] is None:
        #            continue
        s = extend_slice(slices[pore], im.shape)
        sub_im = im[s]
        sub_dt = dt[s]
        pore_im = sub_im == i
        pore_dt = spim.distance_transform_edt(
            sp.pad(pore_im, pad_width=1, mode='constant'))
        s_offset = sp.array([i.start for i in s])
        p_label[pore] = i
        p_coords[pore, :] = spim.center_of_mass(pore_im) + s_offset
        p_volume[pore] = sp.sum(pore_im)
        p_dia_local[pore] = 2 * sp.amax(pore_dt)
        p_dia_global[pore] = 2 * sp.amax(sub_dt)
        p_area_surf[pore] = sp.sum(pore_dt == 1)
        p_area_solid[pore] = sp.sum(sub_dt == 1)
        im_w_throats = spim.binary_dilation(input=pore_im, structure=ball(1))
        im_w_throats = im_w_throats * sub_im
        Pn = sp.unique(im_w_throats)[1:] - 1
        for j in Pn:
            if j > pore:
                t_conns.append([pore, j])
                vx = sp.where(im_w_throats == (j + 1))
                t_dia_inscribed.append(2 * sp.amax(sub_dt[vx]))
                t_perimeter.append(sp.sum(sub_dt[vx] < 2))
                t_area.append(sp.size(vx[0]))
                t_inds = tuple([i + j for i, j in zip(vx, s_offset)])
                temp = sp.where(dt[t_inds] == sp.amax(dt[t_inds]))[0][0]
                if im.ndim == 2:
                    t_coords.append(tuple((t_inds[0][temp], t_inds[1][temp])))
                else:
                    t_coords.append(
                        tuple((t_inds[0][temp], t_inds[1][temp],
                               t_inds[2][temp])))
    # Clean up values
    Nt = len(t_dia_inscribed)  # Get number of throats
    if im.ndim == 2:  # If 2D, add 0's in 3rd dimension
        p_coords = sp.vstack((p_coords.T, sp.zeros((Np, )))).T
        t_coords = sp.vstack((sp.array(t_coords).T, sp.zeros((Nt, )))).T

    # Start creating dictionary of pore network information
    net = {}
    net['pore.all'] = sp.ones((Np, ), dtype=bool)
    net['throat.all'] = sp.ones((Nt, ), dtype=bool)
    net['pore.coords'] = sp.copy(p_coords) * voxel_size
    net['pore.centroid'] = sp.copy(p_coords) * voxel_size
    net['throat.centroid'] = sp.array(t_coords) * voxel_size
    net['throat.conns'] = sp.array(t_conns)
    net['pore.label'] = sp.array(p_label)
    net['pore.volume'] = sp.copy(p_volume) * (voxel_size**3)
    net['throat.volume'] = sp.zeros((Nt, ), dtype=float)
    net['pore.diameter'] = sp.copy(p_dia_local) * voxel_size
    net['pore.inscribed_diameter'] = sp.copy(p_dia_local) * voxel_size
    net['pore.equivalent_diameter'] = 2 * (
        (3 / 4 * net['pore.volume'] / sp.pi)**(1 / 3))
    net['pore.extended_diameter'] = sp.copy(p_dia_global) * voxel_size
    net['pore.surface_area'] = sp.copy(p_area_surf) * (voxel_size)**2
    net['pore.solid_area'] = sp.copy(p_area_solid) * (voxel_size)**2
    net['throat.diameter'] = sp.array(t_dia_inscribed) * voxel_size
    net['throat.inscribed_diameter'] = sp.array(t_dia_inscribed) * voxel_size
    net['throat.area'] = sp.array(t_area) * (voxel_size**2)
    net['throat.perimeter'] = sp.array(t_perimeter) * voxel_size
    net['throat.equivalent_diameter'] = (sp.array(t_area) *
                                         (voxel_size**2))**(0.5)
    P12 = net['throat.conns']
    PT1 = sp.sqrt(
        sp.sum(((p_coords[P12[:, 0]] - t_coords) * voxel_size)**2, axis=1))
    PT2 = sp.sqrt(
        sp.sum(((p_coords[P12[:, 1]] - t_coords) * voxel_size)**2, axis=1))
    net['throat.total_length'] = PT1 + PT2
    PT1 = PT1 - p_dia_local[P12[:, 0]] / 2 * voxel_size
    PT2 = PT2 - p_dia_local[P12[:, 1]] / 2 * voxel_size
    net['throat.length'] = PT1 + PT2
    dist = (p_coords[P12[:, 0]] - p_coords[P12[:, 1]]) * voxel_size
    net['throat.direct_length'] = sp.sqrt(sp.sum(dist**2, axis=1))

    return net
Пример #55
0
        #plot constant
        #if constant != None and subplot:

        if fit:
            # Sometimes the curve_fit will not be able to calculate the variance
            # Instead it returns inf as the var_matrix. In this case, scaling
            # the y-values can help.
            scale = 1.0
            while True:
                try:
                    coeff, var_matrix = curve_fit(func, xy[:, 0],
                                                  scale * xy[:, 1])
                    coeff = coeff / scale
                    variance = sp.diagonal(var_matrix)
                    SE = sp.sqrt(variance)
                    SE = SE / scale
                    results = {}
                    for k in range(len(SE)):
                        results[parameters[k]] = [coeff[k], SE[k]]
                    print
                    print '{0:>5}  {1:>12}  {2:>12}'.format(
                        'Coeff', 'Value', 'Error')
                    for v, c in results.iteritems():
                        print '{0:>5}  {1:>12.5f}  {2:>12.5f}'.format(
                            v, c[0], c[1])
                    break

                except ValueError, e:
                    scale *= 10
                    if scale == 1000000:
# zero-padding and calulate frequency axis
N = zero_padding(T)
freq = fftpack.fftfreq(N,
                       d=para_dict['step'] * 10**(-15))  # frequency axes in Hz
if para_dict['step'] < 0:
    freq = freq[::-1]  # reverse array
wn = freq / 100.0 / const.c + para_dict[
    'wn_M']  # measured wavenumbers in cm^-1
cut_index = np.argmin(wn)
wn = redistribute(wn, cut_index)
# create spectrogram-matrix
S = np.zeros((np.size(T), np.size(wn)))
# calculate start position
data_window[0] /= 1000.0  # back to ps
data_window[1] /= 1000.0  # back to ps
HWFP = sp.sqrt(3.0 / sp.log(
    2)) * FWHM / 2.0  # half width where gaussian has dropped to 5% i.e. 1/e^3
slide_pos = data_window[0] - HWFP
n = 0

while slide_pos <= data_window[1] + HWFP:
    Z_slide = slide_window(
        T, Z, slide_pos, FWHM
    )  # multiply gaussian onto data set which is centered at current sliding position
    DFT_slide = redistribute(
        abs(fftpack.fft(Z_slide, n=N)), cut_index
    )  # FFT of interferogram which has been truncated by mulitplying wiht gaussian
    n += 1
    if para_dict['step'] < 0:
        DFT_slide = DFT_slide[::-1]  # reverse array
    # add DFT to spectrum-matrix while weighting with temporal gaussian envelope
    for i in range(np.size(T)):
Пример #57
0
 def sqrt(self):
     return simple_diag_matrix(sp.sqrt(self.diag))
Пример #58
0
Sy = 490.0  # MPa

F_max = 15000.0  # Newtons
F_min = 5000.0  # Newtons

w = 30.0  # mm
h = 10.0  # mm

n = 1.2  # Safety Factor

#%%
# Loading Factors:
C_load = 0.7

A_95 = 0.05 * w * h  # mm^2
d_rect_area = sp.sqrt(A_95 / 0.076)  # mm
C_size = 1.189 * d_rect_area**(-0.097)

C_surface = 1.58 * (590**(-0.085))

C_temp = 1.0

C_reliab = 0.814

Se_prime = 0.5 * Sut  # MPa


def get_Se(Se_prime, C_load, C_size, C_surface, C_reliab, C_temp):
    ''' Returns the value of Se '''
    return Se_prime * C_load * C_size * C_surface * C_temp * C_reliab
Пример #59
0
def dij_particle(p1, p2):
    return sp.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2 +
                   (p1[2] - p2[2])**2)
Пример #60
0
def van_rossum_dist(trains, tau=1.0 * pq.s, sort=True):
    """
    Calculates the van Rossum distance.

    It is defined as Euclidean distance of the spike trains convolved with a
    causal decaying exponential smoothing filter. A detailed description can
    be found in *Rossum, M. C. W. (2001). A novel spike distance. Neural
    Computation, 13(4), 751-763.* This implementation is normalized to yield
    a distance of 1.0 for the distance between an empty spike train and a
    spike train with a single spike. Divide the result by sqrt(2.0) to get
    the normalization used in the cited paper.

    Given :math:`N` spike trains with :math:`n` spikes on average the run-time
    complexity of this function is :math:`O(N^2 n)`.

    Parameters
    ----------
    trains : Sequence of :class:`neo.core.SpikeTrain` objects of
        which the van Rossum distance will be calculated pairwise.
    tau : Quantity scalar
        Decay rate of the exponential function as time scalar. Controls for
        which time scale the metric will be sensitive. This parameter will
        be ignored if `kernel` is not `None`. May also be :const:`scipy.inf`
        which will lead to only measuring differences in spike count.
        Default: 1.0 * pq.s
    sort : bool
        Spike trains with sorted spike times might be needed for the
        calculation. You can set `sort` to `False` if you know that your
        spike trains are already sorted to decrease calculation time.
        Default: True

    Returns
    -------
        2-D array
        Matrix containing the van Rossum distances for all pairs of
        spike trains.

    Example
    -------
        import elephant.spike_train_dissimilarity_measures as stdm
        tau = 10.0 * pq.ms
        st_a = SpikeTrain([10, 20, 30], units='ms', t_stop= 1000.0)
        st_b = SpikeTrain([12, 24, 30], units='ms', t_stop= 1000.0)
        vr   = stdm.van_rossum_dist([st_a, st_b], tau)[0, 1]
    """
    for train in trains:
        if not (isinstance(train, (pq.quantity.Quantity, SpikeTrain))
                and train.dimensionality.simplified == pq.Quantity(
                    1, "s").dimensionality.simplified):
            raise TypeError("Spike trains must have a time unit.")

    if not (isinstance(tau, pq.quantity.Quantity)
            and tau.dimensionality.simplified == pq.Quantity(
                1, "s").dimensionality.simplified):
        raise TypeError("tau must be a time quantity.")

    if tau == 0:
        spike_counts = [st.size for st in trains]
        return np.sqrt(spike_counts + np.atleast_2d(spike_counts).T)
    elif tau == np.inf:
        spike_counts = [st.size for st in trains]
        return np.absolute(spike_counts - np.atleast_2d(spike_counts).T)

    k_dist = _summed_dist_matrix([st.view(type=pq.Quantity) for st in trains],
                                 tau, not sort)
    vr_dist = np.empty_like(k_dist)
    for i, j in np.ndindex(k_dist.shape):
        vr_dist[i, j] = (k_dist[i, i] + k_dist[j, j] - k_dist[i, j] -
                         k_dist[j, i])
    return sp.sqrt(vr_dist)