예제 #1
0
	def _initParams_fast(self):
		""" 
		initialize the gp parameters
			1) project Y on the known factor X0 -> Y0
				average variance of Y0 is used to initialize the variance explained by X0
			2) considers the residual Y1 = Y-Y0 (this equivals to regress out X0)
			3) perform PCA on cov(Y1) and considers the first k PC for initializing X
			4) the variance of all other PCs is used to initialize the noise
			5) the variance explained by interaction is set to a small random number 
		"""
		Xd = LA.pinv(self.X0)
		Y0 = self.X0.dot(Xd.dot(self.Y))
		Y1 = self.Y-Y0
		YY = SP.cov(Y1)
		S,U = LA.eigh(YY)
		X = U[:,-self.k:]*SP.sqrt(S[-self.k:])
		a = SP.array([SP.sqrt(Y0.var(0).mean())])
		b = 1e-3*SP.randn(1)
		c = SP.array([SP.sqrt((YY-SP.dot(X,X.T)).diagonal().mean())])
		# gp hyper params
		params = limix.CGPHyperParams()
		if self.interaction:
			params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F'),SP.ones(1),b])
		else:
			params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F')])
		params['lik'] = c
		return params
예제 #2
0
def ideal_data(num, dimU, dimY, dimX, noise=1):
    """Linear system data"""
    # generate randomized linear system matrices
    A = randn(dimX, dimX)
    B = randn(dimX, dimU)
    C = randn(dimY, dimX)
    D = randn(dimY, dimU)

    # make sure state evolution is stable
    U, S, V = svd(A)
    A = dot(U, dot(diag(S / max(S)), V))
    U, S, V = svd(B)
    S2 = zeros((size(U,1), size(V,0)))
    S2[:,:size(U,1)] = diag(S / max(S))
    B = dot(U, dot(S2, V))

    # random input
    U = randn(num, dimU)

    # initial state
    X = reshape(randn(dimX), (1,-1))

    # initial output
    Y = reshape(dot(C, X[-1]) + dot(D, U[0]), (1,-1))

    # generate next state
    X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, U[0]), (1,-1))))

    # and so forth
    for u in U[1:]:
        Y = concatenate((Y, reshape(dot(C, X[-1]) + dot(D, u), (1,-1))))
        X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, u), (1,-1))))

    return U, Y + randn(num, dimY) * noise
예제 #3
0
def dwt_2d(image, poly, l=1):
    """
    Computes the discrete wavelet transform for a 2D input image
    :param image: input image to be processed
    :param poly: polyphase filter matrix cointing the lowpass and highpass coefficients
    :param l: amount of transforms to be applied
    :return: the transformed image
    """
    assert max(mod(image.shape, 2**l)) == 0, 'image dimension ({}) does not allow for a {}-level decomposition'.format(image.shape, l)

    image_ = image.copy()
    for level in range(l):
        sub_image = image_[:(image.shape[0]/(2**level)), :(image.shape[1]/(2**level))]

        for row in range(sub_image.shape[0]):
            s = sub_image[row, :]
            a, d = dwt(s, poly)

            sub_image[row, :] = concatenate((a[newaxis, :], d[0][newaxis, :]), axis=1)

        for col in range(sub_image.shape[1]):
            s = sub_image[:, col]
            a, d = dwt(s, poly)

            sub_image[:, col] = concatenate((a, d[0]), axis=0)

    return image_
예제 #4
0
def idwt(a, d, poly, l=1):
    """
    Computes the inverse discrete wavelet transform for a 1D signal
    :param a: the approximation coefficients at the deepest level
    :param d: a list of detail coefficients for each level
    :param poly: polyphase filter matrix cointing the lowpass and highpass coefficients
    :param l: amount of transforms to be applied
    :return: the transformed signal
    """
    assert len(d) == l, 'insufficient detail coefficients provided for reconstruction depth {}'.format(l)

    if len(a.shape) == 1:
        a = a[newaxis, :]

    for level in reversed(range(l)):
        decomposition = concatenate((a, d[level][newaxis, :]), axis=0)

        reconstruction = zeros_like(decomposition, dtype=float)
        for z in range(poly.shape[1]/2):
            reconstruction += dot(poly[:, 2*z:2*z+2].transpose(), concatenate(
                (decomposition[:, decomposition.shape[1]-z:], decomposition[:, :decomposition.shape[1]-z]), axis=1))

        a = reconstruction.transpose().reshape(1, 2*a.shape[1])

    return a
예제 #5
0
def mlr(x,y,order):
    """Multiple linear regression fit of the columns of matrix x 
    (dependent variables) to constituent vector y (independent variables)
    
    order -     order of a smoothing polynomial, which can be included 
                in the set of independent variables. If order is
                not specified, no background will be included.
    b -         fit coeffs
    f -         fit result (m x 1 column vector)
    r -         residual   (m x 1 column vector)
    """
    
    if order > 0:
        s=scipy.ones((len(y),1))
        for j in range(order):
            s=scipy.concatenate((s,(scipy.arange(0,1+(1.0/(len(y)-1)),1.0/(len(y)-1))**j)[:,nA]),1)
        X=scipy.concatenate((x, s),1)
    else:
        X = x
    
    #calc fit b=fit coefficients
    b = scipy.dot(scipy.dot(scipy.linalg.pinv(scipy.dot(scipy.transpose(X),X)),scipy.transpose(X)),y)
    f = scipy.dot(X,b)
    r = y - f

    return b,f,r
예제 #6
0
 def _getScalesDiag(self,termx=0):
     """
     Internal function for parameter initialization
     Uses 2 term single trait model to get covar params for initialization
     
     Args:
         termx:      non-noise term terms that is used for initialization 
     """
     assert self.P>1, 'VarianceDecomposition:: diagonal init_method allowed only for multi trait models' 
     assert self.noisPos!=None, 'VarianceDecomposition:: noise term has to be set'
     assert termx<self.n_randEffs-1, 'VarianceDecomposition:: termx>=n_randEffs-1'
     assert self.trait_covar_type[self.noisPos] not in ['lowrank','block','fixed'], 'VarianceDecomposition:: diagonal initializaiton not posible for such a parametrization'
     assert self.trait_covar_type[termx] not in ['lowrank','block','fixed'], 'VarianceDecimposition:: diagonal initializaiton not posible for such a parametrization'
     scales = []
     res = self._getH2singleTrait(self.vd.getTerm(termx).getK())
     scaleg = sp.sqrt(res['varg'].mean())
     scalen = sp.sqrt(res['varn'].mean())
     for term_i in range(self.n_randEffs):
         if term_i==termx:
             _scales = scaleg*self.diag[term_i]
         elif term_i==self.noisPos:
             _scales = scalen*self.diag[term_i]
         else:
             _scales = 0.*self.diag[term_i]
         if self.jitter[term_i]>0:
             _scales = sp.concatenate((_scales,sp.array([sp.sqrt(self.jitter[term_i])])))
         scales.append(_scales)
     return sp.concatenate(scales)
예제 #7
0
파일: ROC.py 프로젝트: cyversewarwick/gp2s
def roc(labels, predictions):
    """roc - calculate receiver operator curve
    labels: true labels (>0 : True, else False)
    predictions: the ranking generated from whatever predictor is used"""
    #1. convert to arrays
    labels = S.array(labels).reshape([-1])
    predictions = S.array(predictions).reshape([-1])

    #threshold
    t = labels>0
    
    #sort predictions in desceninding order
    #get order implied by predictor (descending)
    Ix = S.argsort(predictions)[::-1]
    #reorder truth
    t = t[Ix]

    #compute true positiive and false positive rates
    tp = S.double(N.cumsum(t))/t.sum()
    fp = S.double(N.cumsum(~t))/(~t).sum()

    #add end points
    tp = S.concatenate(([0],tp,[1]))
    fp = S.concatenate(([0],fp,[1]))

    return [tp,fp]
예제 #8
0
def main():
    points = generate_gaussian(1000, 2, 0, 2, center=(10, 0))
    pylab.plot (points[:,0], points[:,1], 'r+')
    #export("Classe A", points)
    points2 = generate_gaussian(1000, 2, 0, 2, center=(5, 5))
    pylab.plot (points2[:,0], points2[:,1], 'b+')
    #export("Classe C", points)
    points3 = generate_gaussian(1000, 2, 0, 2, center=(0, 10))
    pylab.plot (points3[:,0], points3[:,1], 'y+')
    points4 = generate_gaussian(1000, 2, 0, 2, center=(0, 0))
    pylab.plot (points4[:,0], points4[:,1], 'g+')
    pylab.axis([-10, 20, -10, 20])
    pylab.show()

    labels = []
    for i in xrange(len(points)):
        labels.append(0)
    for i in xrange(len(points2)):
        labels.append(1)
    for i in xrange(len(points3)):
        labels.append(2)
    for i in xrange(len(points4)):
        labels.append(3)

    points = scipy.concatenate ((points, points2))
    points = scipy.concatenate ((points, points3))
    points = scipy.concatenate ((points, points4))

    data = dataset.Dataset (points, labels)
    data.random ()

    dataset.save (data, "../datasets/4gaussians1k.data")
예제 #9
0
def ar_model_check_stable(A):
    """check if this AR model is stable

    :Parameters:
        A : ndarray
            The coefficient matrix of the model
    """

    # inits and checks
    m, p = A.shape
    p /= m
    if p != round(p):
        raise ValueError('bad inputs!')

    # check for stable model
    A1 = N.concatenate((
        A,
        N.concatenate((
            N.eye((p - 1) * m),
            N.zeros(((p - 1) * m, m))
        ), axis=1)
    ))
    lambdas = NL.eigvals(A1)
    rval = True
    if (N.absolute(lambdas) > 1).any():
        rval = False
    del A1, lambdas
    return rval
예제 #10
0
 def _update_6(self):
     # construct system
     Ax = scipy.zeros((len(self.data), 6))
     Ax[:, 0] = 1.0
     Ax[:, 2] = self.data[:, 0] - self.center[0]
     Ax[:, 3] = self.data[:, 1] - self.center[1]
     Ay = scipy.zeros((len(self.data), 6))
     Ay[:, 1] = 1.0
     Ay[:, 4] = self.data[:, 0] - self.center[0]
     Ay[:, 5] = self.data[:, 1] + self.center[1]
     A = scipy.concatenate((Ax, Ay), axis = 0)
     del Ax, Ay
     b = scipy.concatenate((self.data[:, 2], self.data[:, 3]))
     # solve for parameters
     parameters, residual, rank, sigma = scipy.linalg.lstsq(A, b)
     self.tx = parameters[0]
     self.ty = parameters[1]
     self.exx = parameters[2]
     self.exy = parameters[3]
     self.eyx = parameters[4]
     self.eyy = parameters[5]
     del parameters
     # compute residuals
     self.residuals[:, 2] = self.data[:, 2] - self.tx - self.exx * (self.data[:, 0] - self.center[0]) - self.exy * (self.data[:, 1] - self.center[1])
     self.residuals[:, 3] = self.data[:, 3] - self.ty - self.eyx * (self.data[:, 0] - self.center[0]) - self.eyy * (self.data[:, 1] - self.center[1])
예제 #11
0
def shift_row(row, shift):
    if shift == 0:
        return row
    if shift > 0:
        return sp.concatenate(([0] * shift, row[:-shift]))
    else:
        return sp.concatenate((row[-shift:], [0] * -shift))
예제 #12
0
파일: ledsim.py 프로젝트: puluoning/ledsim
 def __init__(self,layers,gridOpts):
   ''' Initialize the grid using the given layers and grid options.
   '''
   segments = []
   qStart   =  scipy.inf
   qEnd     = -scipy.inf
   for layer in layers:
     if layer.isQuantum:
       d1 = dn = gridOpts.dzQuantum
       segments += [self.get_dz_segment(d1,dn,layer.thickness)]
       qStart = min(qStart,sum([len(seg) for seg in segments[:-1]]))
       qEnd   = max(qEnd,  sum([len(seg) for seg in segments]))
     elif gridOpts.useFixedGrid:
       d1 = dn = gridOpts.dz
       segments += [self.get_dz_segment(d1,dn,layer.thickness)]
     elif layer.thickness*gridOpts.dzCenterFraction > gridOpts.dzEdge:
       d1 = dn = gridOpts.dzEdge
       dc = gridOpts.dzCenterFraction*layer.thickness
       segments += [self.get_dz_segment(d1,dc,layer.thickness/2),
                    self.get_dz_segment(dc,dn,layer.thickness/2)]
     else:
       d1 = dn = gridOpts.dzEdge
       segments += [self.get_dz_segment(d1,dn,layer.thickness)]
   self.dz       = scipy.concatenate(segments)
   self.z        = scipy.concatenate(([0],scipy.cumsum(self.dz)))
   self.zr       = (self.z[:-1]+self.z[1:])/2
   self.znum     = len(self.z)
   self.rnum     = len(self.zr)
   self.gridOpts = gridOpts
   self.qIndex   = scipy.arange(qStart,qEnd+1)   # Wavefunction index
   self.qrIndex  = scipy.arange(qStart,qEnd)     # Quantum region index   
예제 #13
0
파일: parlib.py 프로젝트: pokornyv/SPEpy
def KramersKronigFFT(ImX_A):
	'''	Hilbert transform used to calculate real part of a function from its imaginary part
	uses piecewise cubic interpolated integral kernel of the Hilbert transform
	use only if len(ImX_A)=2**m-1, uses fft from scipy.fftpack  '''
	X_A = sp.copy(ImX_A)
	N = int(len(X_A))
	## be careful with the data type, orherwise it fails for large N
	if N > 3e6: A = sp.arange(3,N+1,dtype='float64')
	else:       A = sp.arange(3,N+1)  
	X1 = 4.0*sp.log(1.5)
	X2 = 10.0*sp.log(4.0/3.0)-6.0*sp.log(1.5)
	## filling the kernel
	if N > 3e6: Kernel_A = sp.zeros(N-2,dtype='float64')
	else:       Kernel_A = sp.zeros(N-2)
	Kernel_A = (1-A**2)*((A-2)*sp.arctanh(1.0/(1-2*A))+(A+2)*sp.arctanh(1.0/(1+2*A)))\
	+((A**3-6*A**2+11*A-6)*sp.arctanh(1.0/(3-2*A))+(A+3)*(A**2+3*A+2)*sp.arctanh(1.0/(2*A+3)))/3.0
	Kernel_A = sp.concatenate([-sp.flipud(Kernel_A),sp.array([-X2,-X1,0.0,X1,X2]),Kernel_A])/sp.pi
	## zero-padding the functions for fft
	ImXExt_A = sp.concatenate([X_A[int((N-1)/2):],sp.zeros(N+2),X_A[:int((N-1)/2)]])
	KernelExt_A = sp.concatenate([Kernel_A[N:],sp.zeros(1),Kernel_A[:N]])
	## performing the fft
	ftReXExt_A = -fft(ImXExt_A)*fft(KernelExt_A)
	ReXExt_A = sp.real(ifft(ftReXExt_A))
	ReX_A = sp.concatenate([ReXExt_A[int((3*N+3)/2+1):],ReXExt_A[:int((N-1)/2+1)]])
	return ReX_A
예제 #14
0
파일: FEM.py 프로젝트: mrinaliyer/tuckerDFT
    def generateNodesAdaptive(self):
        innerDomainSize = self.innerDomainSize
        innerMeshSize   = self.innerMeshSize
        numberElementsInnerDomain = innerDomainSize/innerMeshSize
	assert(numberElementsInnerDomain < self.numberElements)
        domainCenter = (self.domainStart+self.domainEnd)/2
        nodes0 = np.linspace(domainCenter,innerDomainSize/2.0,(numberElementsInnerDomain/2.0)+1.0)
        nodes0 = np.delete(nodes0,-1)
        numberOuterIntervalsFromDomainCenter = (self.numberElements - numberElementsInnerDomain)/2.0
        const = np.log2(innerDomainSize/2.0)/0.5
        exp = np.linspace(const,np.log2(self.domainEnd*self.domainEnd),numberOuterIntervalsFromDomainCenter+1)
        nodes1 = np.power(np.sqrt(2),exp)
        nodesp = np.concatenate((nodes0,nodes1))
        nodesn = -nodesp[::-1]
        nodesn = np.delete(nodesn,-1)
        linNodalCoordinates = np.concatenate((nodesn,nodesp))
        nodalCoordinates = 0

        #Introduce higher order nodes
        if self.elementType == "quadratic" or self.elementType == "cubic":
           if self.elementType == "quadratic":
              numberNodesPerElement = 3 
           elif self.elementType == "cubic":
              numberNodesPerElement = 4

           for i in range(0,len(linNodalCoordinates)-1):
              newnodes = np.linspace(linNodalCoordinates[i],linNodalCoordinates[i+1],numberNodesPerElement)
              nodalCoordinates = np.delete(nodalCoordinates,-1)
              nodalCoordinates = np.concatenate((nodalCoordinates,newnodes))

        else:
           nodalCoordinates = linNodalCoordinates
    
        return nodalCoordinates
예제 #15
0
파일: core.py 프로젝트: pennajm/gptools
 def __call__(self, Xi, Xj, ni, nj, hyper_deriv=None, symmetric=False):
     """Evaluate the covariance between points `Xi` and `Xj` with derivative order `ni`, `nj`.
     
     Parameters
     ----------
     Xi : :py:class:`Matrix` or other Array-like, (`M`, `N`)
         `M` inputs with dimension `N`.
     Xj : :py:class:`Matrix` or other Array-like, (`M`, `N`)
         `M` inputs with dimension `N`.
     ni : :py:class:`Matrix` or other Array-like, (`M`, `N`)
         `M` derivative orders for set `i`.
     nj : :py:class:`Matrix` or other Array-like, (`M`, `N`)
         `M` derivative orders for set `j`.
     hyper_deriv : Non-negative int or None, optional
         The index of the hyperparameter to compute the first derivative
         with respect to. If None, no derivatives are taken. Hyperparameter
         derivatives are not supported at this point. Default is None.
     symmetric : bool, optional
         Whether or not the input `Xi`, `Xj` are from a symmetric matrix.
         Default is False.
     
     Returns
     -------
     Kij : :py:class:`Array`, (`M`,)
         Covariances for each of the `M` `Xi`, `Xj` pairs.
     
     Raises
     ------
     NotImplementedError
         If the `hyper_deriv` keyword is not None.
     """
     if hyper_deriv is not None:
         raise NotImplementedError("Hyperparameter derivatives have not been implemented!")
     n_cat = scipy.asarray(scipy.concatenate((ni, nj), axis=1), dtype=int)
     X_cat = scipy.asarray(scipy.concatenate((Xi, Xj), axis=1), dtype=float)
     n_cat_unique = unique_rows(n_cat)
     k = scipy.zeros(Xi.shape[0], dtype=float)
     # Loop over unique derivative patterns:
     if self.num_proc > 1:
         pool = multiprocessing.Pool(processes=self.num_proc)
     for n_cat_state in n_cat_unique:
         idxs = scipy.where(scipy.asarray((n_cat == n_cat_state).all(axis=1)).squeeze())[0]
         if (n_cat_state == 0).all():
             k[idxs] = self.cov_func(Xi[idxs, :], Xj[idxs, :], *self.params)
         else:
             if self.num_proc > 1 and len(idxs) > 1:
                 k[idxs] = scipy.asarray(
                     pool.map(_ArbitraryKernelEval(self, n_cat_state), X_cat[idxs, :]),
                     dtype=float
                 )
             else:
                 for idx in idxs:
                     k[idx] = mpmath.chop(mpmath.diff(self._mask_cov_func,
                                                      X_cat[idx, :],
                                                      n=n_cat_state,
                                                      singular=True))
     
     if self.num_proc > 0:
         pool.close()
     return k
예제 #16
0
파일: lmm_fast.py 프로젝트: PMBio/limix
def run_interact(Y, intA, intB, covs, K):
    """ Calculate pvalues for the nested model of including a multiplicative term between intA and intB into the additive model """
    [N, Ny] = Y.shape

    Na = intA.shape[1] # number of interaction terms 1
    Nb = intB.shape[1] # number of interaction terms 2
    
    S,U=LA.eigh(K);
    UY=SP.dot(U.T,Y);
    UintA=SP.dot(U.T,intA);
    UintB=SP.dot(U.T,intB);
    Ucovs=SP.dot(U.T,covs);
    # for each snp/gene/factor combination, run a lod
    # snps need to be diced bc of missing values - iterate over them, else in arrays
    lods = SP.zeros([Na, Nb, Ny])

    #add mean column:
    if covs is None: covs = SP.ones([Ny,1])

    # for each pair of interacting terms
    for a in range(Na):
        for b in range(Nb):
            # calculate additive and interaction terms
            C = SP.concatenate((Ucovs, UintA[:,a:a+1], UintB[:,b:b+1]))
            X = intA[:,a:a+1]*intB[:,b:b+1]
            UX = SP.dot(U.T,X);
            UX = SP.concatenate((UX, C))
            for phen in SP.arange(Ny):
                UY_=UY[:,phen];
                nllnull,ldeltanull=optdelta(UY_,C,S,ldeltanull=None,numintervals=10,ldeltamin=-5.0,ldeltamax=5.0);
                nllalt,ldeltaalt=optdelta(UY_,UX,S,ldeltanull=ldeltanull,numintervals=100,ldeltamin=-5.0,ldeltamax=5.0);
                lods[a,b,phen] = nllalt-nllalt;
    return lods
예제 #17
0
def invert_epochs(epochs, end=None):
    """inverts epochs inverted

    The first epoch will be mapped to [0, start] and the last will be mapped
    to [end of last epoch, :end:]. Epochs that accidentally become negative
    or zero-length will be omitted.

    :type epochs: ndarray
    :param epochs: epoch set to invert
    :type end: int
    :param end: If not None, it i taken for the end of the last epoch,
        else max(index-dtype) is taken instead.
        Default=None
    :returns: ndarray - inverted epoch set
    """

    # checks
    if end is None:
        end = sp.iinfo(INDEX_DTYPE).max
    else:
        end = INDEX_DTYPE.type(end)

    # flip them
    rval = sp.vstack((sp.concatenate(([0], epochs[:, 1])), sp.concatenate((epochs[:, 0], [end])))).T
    return (rval[rval[:, 1] - rval[:, 0] > 0]).astype(INDEX_DTYPE)
예제 #18
0
def philm(l,m,x,y,w0):
    
    normalize_factor = scipy.sqrt(2/2**(l+m)/scipy.misc.factorial(l)/scipy.misc.factorial(m)/pi)/w0
    hermite_x = numpy.polynomial.hermite.hermval(scipy.sqrt(2)*x/w0,scipy.concatenate((scipy.zeros(l),scipy.ones(1))))
    hermite_y = numpy.polynomial.hermite.hermval(scipy.sqrt(2)*y/w0,scipy.concatenate((scipy.zeros(m),scipy.ones(1))))
    phi_lm = normalize_factor*hermite_x*hermite_y*scipy.exp(-(x**2+y**2)/w0**2)
    return phi_lm
예제 #19
0
 def run(self, T, dT=None, nT=100, times=None):
     """Run the Repressilator for the specified amount of time T, returning
     output either for fixed time step dT, or over a specified number
     of timesteps nT, or for a specified array of times.  Store the
     trajectory returned by odeint in the instance variable self.traj,
     concatenating the result to the existing self.traj if a previous
     trajectory had been created."""
     if times is None:
         if dT is None:
             #times = scipy.linspace(self.t, self.t+T, nT)
             times = scipy.arange(0., 50., 0.2)
         else:
             times = scipy.arange(self.t, self.t+T, dT)
     traj = scipy.integrate.odeint(self.dydt, self.y, times, \
                                   args=(self.alpha, self.n, self.alpha0,
                                         self.beta), mxstep=1000)
     if self.traj is None:
         self.traj = traj
         self.y = self.traj[-1]
         self.times = times
         self.t = self.times[-1]
     else:
         self.traj = scipy.concatenate((self.traj, traj))
         self.y = self.traj[-1]
         self.times = scipy.concatenate((self.times, times))
         self.t = self.times[-1]
     return traj[:, :3]#assume only mRna conc known
        def sampleIndicator(Z,I,take_out=True):
            """sample all indicators that are true in the index vector I
            take_out: take indices out of the dataset first (yes)
            """

            #create indicator vectors for joint & single GP as well as classifier
            
            PZ = S.zeros([2,I.sum()])
            IS = Z
            IJ = ~Z
            IZ = S.ones(Z.shape[0],dtype='bool')
            if take_out:
                #take out the Ith observation from each GP
                IS = IS & (~I)
                IJ = IJ & (~I)
                IZ = IZ & (~I)

            #updata datasets
            self.gpr_0.setData(S.concatenate((M0R[0][:,IS]),axis=0).reshape([-1,1]),S.concatenate((M0R[1][:,IS]),axis=1),process=False)           
            self.gpr_1.setData(S.concatenate((M1R[0][:,IS]),axis=0).reshape([-1,1]),S.concatenate((M1R[1][:,IS]),axis=1),process=False)
            self.gpr_join.setData(S.concatenate((MJR[0][:,IJ]),axis=0).reshape([-1,1]),S.concatenate((MJR[1][:,IJ]),axis=1),process=False)
            self.gpZ.setData(XTR[IZ],Z[IZ],process=False)
            

            #GP predictions
            Yp0 = self.gpr_0.predict(self.gpr_0.logtheta,XT[I],mean=False)
            Yp1 = self.gpr_1.predict(self.gpr_1.logtheta,XT[I],mean=False)
            Ypj = self.gpr_join.predict(self.gpr_0.logtheta,XT[I],mean=False)
            #prdict binary variable
            Zp  = self.gpZ.predict(self.gpZ.logtheta,XT[I])[0]
                      
            #robust likelihood
            c =0.9
            D0  = c    * normpdf(M0R[1][:,I],Yp0[0],Yp0[1])
            D0 += (1-c)* normpdf(M0R[1][:,I],Yp0[0],1E8)
            D0  = S.log(D0)
            
            D1  = c    * normpdf(M1R[1][:,I],Yp1[0],Yp1[1])
            D1 += (1-c)* normpdf(M1R[1][:,I],Yp1[0],1E8)
            D1  = S.log(D1)
            
            DJ  = c    * normpdf(MJR[1][:,I],Ypj[0],Ypj[1])
            DJ += (1-c)* normpdf(MJR[1][:,I],Ypj[0],1E8)
            DJ = S.log(DJ)
            #sum over logs
            DS  = D0.sum(axis=0) + D1.sum(axis=0)
            DJ  = DJ.sum(axis=0)
            #calc posterior 
            PZ[0,:] = (1-Zp)*S.exp(DJ)*self.prior_Z[0]
            PZ[1,:] = Zp    *S.exp(DS)*self.prior_Z[1]
            PZ      /= PZ.sum(axis=0)
            Z_       = S.rand(I.sum())<=PZ[1,:]        
            #sample indicators
            if(IS.sum()==1):
                Z_ = True
            if(IJ.sum()==1):
                Z_ = False
            return [Z_,PZ]
            pass
예제 #21
0
    def __init__(self, U, Y, statedim, reg=None):
        if size(shape(U)) == 1:
            U = reshape(U, (-1,1))
        if size(shape(Y)) == 1:
            Y = reshape(Y, (-1,1))
        if reg is None:
            reg = 0

        yDim = size(Y,1)
        uDim = size(U,1)

        self.output_size = size(Y,1) # placeholder

        # number of samples of past/future we'll mash together into a 'state'
        width = 1
        # total number of past/future pairings we get as a result
        K = size(U,0) - 2 * width + 1

        # build hankel matrices containing pasts and futures
        U_p = array([ravel(U[t : t + width]) for t in range(K)]).T
        U_f = array([ravel(U[t + width : t + 2 * width]) for t in range(K)]).T
        Y_p = array([ravel(Y[t : t + width]) for t in range(K)]).T
        Y_f = array([ravel(Y[t + width : t + 2 * width]) for t in range(K)]).T

        # solve the eigenvalue problem
        YfUfT = dot(Y_f, U_f.T)
        YfUpT = dot(Y_f, U_p.T)
        YfYpT = dot(Y_f, Y_p.T)
        UfUpT = dot(U_f, U_p.T)
        UfYpT = dot(U_f, Y_p.T)
        UpYpT = dot(U_p, Y_p.T)
        F = bmat([[None, YfUfT, YfUpT, YfYpT],
                  [YfUfT.T, None, UfUpT, UfYpT],
                  [YfUpT.T, UfUpT.T, None, UpYpT],
                  [YfYpT.T, UfYpT.T, UpYpT.T, None]])
        Ginv = bmat([[pinv(dot(Y_f,Y_f.T)), None, None, None],
                     [None, pinv(dot(U_f,U_f.T)), None, None],
                     [None, None, pinv(dot(U_p,U_p.T)), None],
                     [None, None, None, pinv(dot(Y_p,Y_p.T))]])
        F = F - eye(size(F, 0)) * reg

        # Take smallest eigenvalues
        _, W = eigs(Ginv.dot(F), k=statedim, which='SR')

        # State sequence is a weighted combination of the past
        W_U_p = W[ width * (yDim + uDim) : width * (yDim + uDim + uDim), :]
        W_Y_p = W[ width * (yDim + uDim + uDim):, :]
        X_hist = dot(W_U_p.T, U_p) + dot(W_Y_p.T, Y_p)

        # Regress; trim inputs to match the states we retrieved
        R = concatenate((X_hist[:, :-1], U[width:-width].T), 0)
        L = concatenate((X_hist[:, 1: ], Y[width:-width].T), 0)
        RRi = pinv(dot(R, R.T))
        RL  = dot(R, L.T)
        Sys = dot(RRi, RL).T
        self.A = Sys[:statedim, :statedim]
        self.B = Sys[:statedim, statedim:]
        self.C = Sys[statedim:, :statedim]
        self.D = Sys[statedim:, statedim:]
예제 #22
0
파일: actorcritic.py 프로젝트: hbhzwj/librl
 def _updateWeights(self, lastobs, lastaction, lastreward, obs, action,
                    reward):
     """Update weights of Critic and Actor based on the (state, action, reward) pair for
     current time and last time"""
     lastfeature = self.module.activate(scipy.concatenate((lastobs, lastaction)))
     feature = self.module.activate(scipy.concatenate((obs, action)))
     self.critic(lastreward, lastfeature, reward, feature)
     self.actor(lastobs, lastaction, lastfeature)
예제 #23
0
def integrate_sens_subset(net, times, rtol=None,
                          fill_traj=False, opt_vars=None,
                          return_derivs=False, redirect_msgs=True):
    """
    Integrate the sensitivity equations for a list of optimizable variables.
    """
    rtol, atol = generate_tolerances(net, rtol)
    # We integrate the net once just to know where all our events are
    traj = integrate(net, times, rtol=rtol, fill_traj=fill_traj, 
                     return_events=False, return_derivs=return_derivs,
                     redirect_msgs=redirect_msgs)

    N_dyn_vars = len(net.dynamicVars)
    # Create the array that will hold all our sensitivities
    out_size = (len(traj.get_times()), N_dyn_vars + N_dyn_vars*len(opt_vars))
    all_yout = scipy.zeros(out_size, scipy.float_)

    # Copy the values from the trajectory into this array
    for ii, dyn_var in enumerate(net.dynamicVars.keys()):
        all_yout[:, ii] = traj.get_var_traj(dyn_var)

    # Similarly for the derivative outputs
    if return_derivs:
        all_youtdt = scipy.zeros(out_size, scipy.float_)
        for ii, dyn_var in enumerate(net.dynamicVars.keys()):
            all_youtdt[:, ii] = traj.get_var_traj((dyn_var, 'time'))

    # Now we integrate one optizable variable at a time
    for ii, opt_var in enumerate(opt_vars):
        single_out = integrate_sens_single(net, traj, rtol, opt_var,
                                           return_derivs, redirect_msgs)
        # Copy the result into our main arrays.
        start_col = (ii+1) * N_dyn_vars
        end_col = (ii+2) * N_dyn_vars
        if not return_derivs:
            all_yout[:, start_col:end_col] = single_out[0]
        else:
            all_yout[:, start_col:end_col] = single_out[0]
            all_youtdt[:, start_col:end_col] = single_out[1]
        # Concatenate the various 'events_occurred'
        for eii,e in enumerate(traj.events_occurred):
            if not hasattr(e, 'time_exec'):
                # This is an event that fired but never executed. We can ignore
                # it since it doesn't affect the dynamics.
                continue
            if not hasattr(e, 'ysens_fired'):
                e.ysens_fired = single_out[-1][eii].ysens_fired
                e.ysens_post_exec = single_out[-1][eii].ysens_post_exec
                e.ysens_pre_exec = single_out[-1][eii].ysens_pre_exec
            else:
                e.ysens_fired = scipy.concatenate((e.ysens_fired, single_out[-1][eii].ysens_fired[N_dyn_vars:]))
                e.ysens_post_exec = scipy.concatenate((e.ysens_post_exec, single_out[-1][eii].ysens_post_exec[N_dyn_vars:]))
                e.ysens_pre_exec = scipy.concatenate((e.ysens_pre_exec, single_out[-1][eii].ysens_pre_exec[N_dyn_vars:]))

    if not return_derivs:
        return traj.get_times(), all_yout, traj.event_info, traj.events_occurred
    else:
        return traj.get_times(), all_yout, all_youtdt, traj.event_info, traj.events_occurred
예제 #24
0
  def read_ppm(self, rawfilename, filename):
    # This function reads the ppm/jpg file and extracts the features if the 
    # features pkl file doesn't exist. It is also compatible for extension 
    # of the feauture vector and doesn't compute the already computed features

    new_feature_string = []
    updated_feature = 0
    data = N.array([], dtype=int)
    if os.path.exists(filename):
      pkl_f = open(filename, 'r')
      (data, labels, feature_string, width, height, winsize, nbins)= pickle.load(pkl_f)
      self.winsize = winsize
      self.nbins = nbins
      new_feature_string = list(feature_string)
      pkl_f.close()      

    if not new_feature_string.count('dsift'):
      updated_feature = 1
      (sift_features, labels, width, height) = self.extract_dsift(rawfilename, self.winsize, self.nbins)
      if data.size:
        data = scipy.concatenate((data.transpose(), sift_features.transpose()), 1).transpose()
      else:
        data = sift_features
      new_feature_string.append('dsift')

    if not new_feature_string.count('histogram'):
      updated_feature = 1 
      (hist_features, labels, width, height) = self.extract_hist(rawfilename, self.winsize, self.nbins)
      hist_features = hist_features/(self.winsize)
      if data.size:
        data = scipy.concatenate((data.transpose(), hist_features.transpose()), 1).transpose()
      else:
        data = hist_features
      new_feature_string.append('histogram')

    '''
    if not new_feature_string.count('position'):
      updated_feature = 1 
      
      position_features = []
      for label in labels:
        (y,x) = map(int, label.strip('()').split(','))
        position_features.append([x,y]) 
      position_features = N.array(position_features)
    
      if data.size:
        data = scipy.concatenate((data.transpose(), position_features), 1).transpose()
      else:
        data = position_features
      new_feature_string.append('position')
    '''
    if updated_feature:
      outf = open(filename, 'w')
      pickle.dump((data, labels, new_feature_string, width, height, self.winsize, self.nbins),outf)
      outf.close()
      print 'Saved data to %s.' % filename
    
    return (data, labels, new_feature_string, width, height, self.winsize, self.nbins)
예제 #25
0
    def _buildTraitCovar(self,trait_covar_type='lowrank_diag',rank=1,fixed_trait_covar=None,jitter=1e-4):
        """
        Internal functions that builds the trait covariance matrix using the LIMIX framework

        Args:
            trait_covar_type: type of covaraince to use. Default 'freeform'. possible values are 
            rank:       rank of a possible lowrank component (default 1)
            fixed_trait_covar:   PxP matrix for the (predefined) trait-to-trait covariance matrix if fixed type is used
            jitter:        diagonal contribution added to trait-to-trait covariance matrices for regularization
        Returns:
            LIMIX::PCovarianceFunction for Trait covariance matrix
            vector labelling Cholesky parameters for different initializations
        """
        cov = limix.CSumCF()
        if trait_covar_type=='freeform':
            cov.addCovariance(limix.CFreeFormCF(self.P))
            L = sp.eye(self.P)
            diag = sp.concatenate([L[i,:(i+1)] for i in range(self.P)])
        elif trait_covar_type=='fixed':
            assert fixed_trait_covar!=None, 'VarianceDecomposition:: set fixed_trait_covar'
            assert fixed_trait_covar.shape[0]==self.N, 'VarianceDecomposition:: Incompatible shape for fixed_trait_covar'
            assert fixed_trait_covar.shape[1]==self.N, 'VarianceDecomposition:: Incompatible shape for fixed_trait_covar'
            cov.addCovariance(limix.CFixedCF(fixed_trait_covar))
            diag = sp.zeros(1)
        elif trait_covar_type=='diag':
            cov.addCovariance(limix.CDiagonalCF(self.P))
            diag = sp.ones(self.P)
        elif trait_covar_type=='lowrank':
            cov.addCovariance(limix.CLowRankCF(self.P,rank))
            diag = sp.zeros(self.P*rank)
        elif trait_covar_type=='lowrank_id':
            cov.addCovariance(limix.CLowRankCF(self.P,rank))
            cov.addCovariance(limix.CFixedCF(sp.eye(self.P)))
            diag = sp.concatenate([sp.zeros(self.P*rank),sp.ones(1)])
        elif trait_covar_type=='lowrank_diag':
            cov.addCovariance(limix.CLowRankCF(self.P,rank))
            cov.addCovariance(limix.CDiagonalCF(self.P))
            diag = sp.concatenate([sp.zeros(self.P*rank),sp.ones(self.P)])
        elif trait_covar_type=='block':
            cov.addCovariance(limix.CFixedCF(sp.ones((self.P,self.P))))
            diag = sp.zeros(1)
        elif trait_covar_type=='block_id':
            cov.addCovariance(limix.CFixedCF(sp.ones((self.P,self.P))))
            cov.addCovariance(limix.CFixedCF(sp.eye(self.P)))
            diag = sp.concatenate([sp.zeros(1),sp.ones(1)])
        elif trait_covar_type=='block_diag':
            cov.addCovariance(limix.CFixedCF(sp.ones((self.P,self.P))))
            cov.addCovariance(limix.CDiagonalCF(self.P))
            diag = sp.concatenate([sp.zeros(1),sp.ones(self.P)])
        else:
            assert True==False, 'VarianceDecomposition:: trait_covar_type not valid'
        if jitter>0:
            _cov = limix.CFixedCF(sp.eye(self.P))
            _cov.setParams(sp.array([sp.sqrt(jitter)]))
            _cov.setParamMask(sp.zeros(1))
            cov.addCovariance(_cov)
        return cov,diag
예제 #26
0
def __resample_to_circle(f, R):
	s1 = concatenate((__theta_samples[R] - 2*pi, __theta_samples[R], __theta_samples[R] + 2*pi))
	ip = interp1d(s1,  concatenate((f, f, f)))
	res = zeros((8*R+4),f.dtype)
	for k in range(8*R + 4):
		theta = k * 2. * pi / float(8*R + 4)
		v = ip(theta)
		res[k] = v
	return res
예제 #27
0
파일: ann_test.py 프로젝트: baxton-sg/ANN
def train(ds1, ds2, ds3):
    X = sp.concatenate((ds1, ds2, ds3), axis=0)
    #Y = sp.concatenate(( [[1,0,0]]*ds1.shape[0], [[0,1,0]]*ds2.shape[0], [[0,0,1]]*ds3.shape[0] ))
    Y = sp.concatenate(( [0]*ds1.shape[0], [1]*ds2.shape[0], [2]*ds3.shape[0] ))

    ann = RandomForestClassifier()
    ann.fit(X, Y)

    return ann
예제 #28
0
    def set_data_by_xy_data(self, x1, x2, y1, y2):
        #not_missing = (numpy.isfinite(x1) * numpy.isfinite(x2) * numpy.isfinite(y1) * numpy.isfinite(y2)).flatten()
        #x1, x2 = x1[not_missing], x2[not_missing]
        #y1, y2 = y1[not_missing], y2[not_missing]

        X = numpy.array([x1, x2]); Y = numpy.array([y1, y2])
        # set individual model's data
        self._models[individual_id].setData(X, Y)
        # set common model's data
        self._models[common_id].setData(scipy.concatenate(X), scipy.concatenate(Y))
예제 #29
0
        def updateGP():
            """update the GP datasets and re-evaluate the Ep approximate likelihood"""
            #0. update the noise level in accordance with the responsibilities
            for t in range(T):
                XS[:,t:R*T:T,-1] = 1/(Z[1,t]+1E-6)
                XJ[:,t:R*T:T,-1] = 1/(Z[0,t]+1E-6)

            GPS.setData(XS,Y)
            #here we joint the two conditions
            GPJ.setData(S.concatenate(XJ,axis=0),S.concatenate(Y,axis=0))
예제 #30
0
파일: parlib.py 프로젝트: pokornyv/SPEpy
def XIntegralsFFT(GF_A,Bubble_A,Lambda,BubZero):
	''' calculate X integral to susceptibilities using FFT '''
	N = int((len(En_A)-1)/2)
	Kappa_A  = TwoParticleBubble(GF_A,GF_A**2,'eh')
	Bubble_A = TwoParticleBubble(GF_A,GF_A,'eh')
	#print(Kappa_A[N],Bubble_A[N])
	V_A   = 1.0/(1.0+Lambda*Bubble_A)
	KV_A  = Lambda*Kappa_A*V_A**2
	KmV_A = Lambda*sp.flipud(sp.conj(Kappa_A))*V_A**2
	## zero-padding the arrays
	exFD_A  = sp.concatenate([FD_A[N:],sp.zeros(2*N+2),FD_A[:N+1]])
	ImGF_A  = sp.concatenate([sp.imag(GF_A[N:]),sp.zeros(2*N+2),sp.imag(GF_A[:N+1])])
	ImGF2_A = sp.concatenate([sp.imag(GF_A[N:]**2),sp.zeros(2*N+2),sp.imag(GF_A[:N+1]**2)])
	ImV_A   = sp.concatenate([sp.imag(V_A[N:]),sp.zeros(2*N+2),sp.imag(V_A[:N+1])])
	ImKV_A  = sp.concatenate([sp.imag(KV_A[N:]),sp.zeros(2*N+2),sp.imag(KV_A[:N+1])])
	ImKmV_A = sp.concatenate([sp.imag(KmV_A[N:]),sp.zeros(2*N+2),sp.imag(KmV_A[:N+1])])
	## performing the convolution
	ftImX11_A = -sp.conj(fft(exFD_A*ImV_A))*fft(ImGF2_A)*dE
	ftImX12_A =  fft(exFD_A*ImGF2_A)*sp.conj(fft(ImV_A))*dE
	ftImX21_A = -sp.conj(fft(exFD_A*ImKV_A))*fft(ImGF_A)*dE
	ftImX22_A =  fft(exFD_A*ImGF_A)*sp.conj(fft(ImKV_A))*dE
	ftImX31_A = -sp.conj(fft(exFD_A*ImKmV_A))*fft(ImGF_A)*dE
	ftImX32_A =  fft(exFD_A*ImGF_A)*sp.conj(fft(ImKmV_A))*dE
	## inverse transform
	ImX1_A =  sp.real(ifft(ftImX11_A+ftImX12_A))/sp.pi
	ImX2_A =  sp.real(ifft(ftImX21_A+ftImX22_A))/sp.pi
	ImX3_A = -sp.real(ifft(ftImX31_A+ftImX32_A))/sp.pi
	ImX1_A =  sp.concatenate([ImX1_A[3*N+4:],ImX1_A[:N+1]])
	ImX2_A =  sp.concatenate([ImX2_A[3*N+4:],ImX2_A[:N+1]])
	ImX3_A =  sp.concatenate([ImX3_A[3*N+4:],ImX3_A[:N+1]])
	## getting real part from imaginary
	X1_A = KramersKronigFFT(ImX1_A) + 1.0j*ImX1_A + BubZero # constant part !!!
	X2_A = KramersKronigFFT(ImX2_A) + 1.0j*ImX2_A
	X3_A = KramersKronigFFT(ImX3_A) + 1.0j*ImX3_A
	return [X1_A,X2_A,X3_A]
예제 #31
0
파일: combinators.py 프로젝트: wqren/pygp
 def get_hyperparameter_names(self):
     """return the names of hyperparameters to make identificatio neasier"""
     return sp.concatenate((self.covar.get_hyperparameter_names(), [
         "Time-Shift rep%i" % (i) for i in sp.unique(self.replicate_indices)
     ]))
예제 #32
0
    nc = t.size
    rp =  sp.random.permutation(nc)
    xt.extend(X[t[rp[0:nt]],:])
    yt.extend(y[t[rp[0:nt]]])

xt = sp.asarray(xt)
yt = sp.asarray(yt)

# Do FFFS
maxVar = 12
model = npfs.GMMFeaturesSelection()
model.learn_gmm(xt,yt)
idx, crit, [] = model.selection('forward',xt, yt,criterion='kappa', varNb=maxVar, nfold=5)

for i in range(maxVar):
    print "({0},{1})".format(wave[idx[i]],crit[i])

for i in range(maxVar):
    print "({0},{1})".format(i+1,crit[i])

# Save selected feature
D = sp.copy(model.mean[0,idx[:2]][:,sp.newaxis])

for i in xrange(1,9):
    D = sp.concatenate((D,model.mean[i,idx[:2]][:,sp.newaxis]),axis=1)

D = D.T
C = sp.arange(1,10)
D = sp.concatenate((D,C[:,sp.newaxis]),axis=1)
sp.savetxt("../FeatureExtraction/figures/fffsMean.csv",D,delimiter=',')
예제 #33
0
    if 0:
        K = SP.eye(100)
        X = SP.random.randn(100,1000)
        C = SP.ones([100,1])
        I = 1.0*(SP.random.rand(100,2)<0.2)
        I0 = 1.0*SP.ones([100,1])
        
        y = 0.2*(I[:,0:1]*X[:,333:333+1]) 
        y/=y.std()
        y+= 0.2*SP.random.randn(y.shape[0],y.shape[1])
        lm = limix.CInteractLMM()
        lm.setTestStatistics(limix.CLMM.TEST_F)
        lm.setK(K)
        lm.setSNPs(X)
        lm.setPheno(y)
        lm.setCovs(SP.concatenate((C,I[:,0:1]),axis=1))
        lm.setInter(I[:,0:1])
        lm.setInter0(I0)

        pdb.set_trace()
        lm.process()
        pv1_llr = lm.getPv()
            
        PL.plot(-SP.log(pv1_llr).ravel())
        pdb.set_trace()
        pass
    
    
    hd = h5py.File('/kyb/agbs/stegle/work/projects/warpedlmm/data/Nordborg_data.h5py','r')
    geno = hd['geno']
    pheno = hd['pheno']
예제 #34
0
#for i in range(len(amps_t)):
i = 0
corr = (advanced_calc.compute(zs_f[i], -1.) -
        d_cluster) / advanced_calc.compute(zs_f[i], -1.) / base
amps_f.append(amps_t[i] / corr)
ampErrs_f.append(ampErrs_t[i] / corr)
i = 1
corr = (advanced_calc.compute(zs_f[i], -1.) -
        d_cluster) / advanced_calc.compute(zs_f[i], -1.) / base
ampErrs_f.append(
    ((ampErrs_t[0] * corr)**2. + (ampErrs_t[i])**2.)**0.5 / amps[i])
amps_f.append(amps_t[i] / corr)

if False:
    amps_f = scipy.concatenate((amps_f, [0.9]))
    ampErrs_f = scipy.concatenate((ampErrs_f, [0.1]))
    zs_f = scipy.concatenate((zs_f, [3.]))

print amps_f, ampErrs_f, zs_f

print len(zs_f)
import shear_ratio, advanced_calc
for w, color in []:  #[[-1.,'orange'],[-0.5,'red'],[-1.5,'blue'],[0,'black']]:
    p = fit_mass(zs_f[:],
                 scipy.array(amps_f)[:],
                 scipy.array(ampErrs_f)[:], cluster_z, w)
    d_cluster = advanced_calc.compute(cluster_z, w)

    import scipy
    ratios = []
예제 #35
0
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import f1_score

# Load data set
im,GeoT,Proj = rt.open_data('../Data/university.tif')
[h,w,b]=im.shape
im.shape=(h*w,b)

# Compute the morphological profile
pca = PCA(n_components=3)
pcs = pca.fit_transform(im)
EMP = []
for i in xrange(3):
    EMP.append(morphological_profile(pcs[:,i].reshape(h,w),step=1,no=10))
EMP = sp.concatenate(EMP,axis=2)
EMP.shape=(h*w,EMP.shape[2])
del pcs

# Concatenate the spectral and spatial features and do scaling
IM_EMP = sp.concatenate((im[:,::2],EMP.astype(im.dtype)),axis=1)

del im,EMP

# Save the results
rt.write_data("../Data/fusion_inputs_university.tif",IM_EMP.reshape(h,w,IM_EMP.shape[1]),GeoT,Proj)

# Get the training set
X,y=rt.get_samples_from_roi('../Data/fusion_inputs_university.tif','../Data/university_gt.tif')

# Scale the data
예제 #36
0
def projection(dt, people, contacts, Vd, dmin = 0.0, \
               nb_iter_max = 100000, rho=0.1, tol = 0.01, log=False, method="cvxopt"):
    """
    From the desired velocities Vd, this projection step consists of computing \
    the global velocity field defined as the closest velocity to the \
    desired one among all the feasible fields (i.e. fields which do not lead \
    to disks overlapping).

    Parameters
    ----------
    dt: float
        time step
    people: numpy array
        people coordinates and radius : x,y,r
    contacts: numpy array
        all the contacts : i,j,dij,eij_x,eij_y
    Vd: numpy array
        people desired velocities
    dmin: float
        minimum distance guaranteed between individuals
    nb_iter_max: integer
        maximum number of iterations allowed
    rho: float
        parameter of the Uzawa method
    tol: float
        tolerance wished
    log: boolean
        to print the final accuracy, number of iterations,...
    method: string
        optimization algorithm : 'cvxopt' (default) or 'uzawa' (or 'mosek' if installed \
        with a license file).

    Returns
    -------
    B: numpy array
        constraint matrix
    U: numpy array
        new people velocities ensuring that there is no overlap \
        between individuals
    L: numpy array
        Lagrange multipliers (only when method='uzawa', None otherwise)
    P: numpy array
        pressure on each individual (only when method='uzawa', None otherwise)
    info: integer
        number of iterations needed
    """
    Np = people.shape[0]
    Nc = contacts.shape[0]
    info = 0
    if (Nc == 0):
        info = 1
        return info, None, Vd, None, None
    else:

        if (method == "cvxopt") or (method == "mosek"):

            import cvxopt
            cvxopt.solvers.options['show_progress'] = False
            cvxopt.solvers.maxiters = 1000
            cvxopt.solvers.abstol = 1e-8
            cvxopt.solvers.reltol = 1e-7
            L = None
            P = None
            U = sp.zeros((2 * Np, ))
            V = sp.zeros((2 * Np, ))
            Z = (contacts[:, 2] - dmin) / dt  ## ie Dij/dt
            V[::2] = Vd[:, 0]
            V[1::2] = Vd[:, 1]  ## A priori velocity
            V = cvxopt.matrix(V)
            Z = cvxopt.matrix(Z, (Nc, 1))
            Id = cvxopt.spdiag([1] * (U.shape[0]))
            if (Nc > 0):
                II = contacts[:, 0].astype(int)
                JJ = contacts[:, 1].astype(int)
                Jpos = sp.where(JJ >= 0)[0]
                Jneg = sp.where(JJ < 0)[0]
                row = sp.concatenate([Jpos, Jpos, Jpos, Jpos, Jneg, Jneg])
                col = sp.concatenate([
                    2 * II[Jpos], 2 * II[Jpos] + 1, 2 * JJ[Jpos],
                    2 * JJ[Jpos] + 1, 2 * II[Jneg], 2 * II[Jneg] + 1
                ])
                data = sp.concatenate([
                    contacts[Jpos, 3], contacts[Jpos, 4], -contacts[Jpos, 3],
                    -contacts[Jpos, 4], -contacts[Jneg, 3], -contacts[Jneg, 4]
                ])
                B = csr_matrix((data, (row, col)),
                               shape=(Nc, 2 * Np))  #.toarray()
                cvxoptB = cvxopt.spmatrix(sp.array(data),
                                          sp.array(row),
                                          sp.array(col),
                                          size=(Nc, 2 * Np))
                if (method == "mosek"):
                    from mosek import iparam
                    cvxopt.solvers.options['mosek'] = {iparam.log: 0}
                    solution = cvxopt.solvers.qp(Id,
                                                 -V,
                                                 cvxoptB,
                                                 Z,
                                                 solver='mosek')
                else:
                    solution = cvxopt.solvers.qp(Id, -V, cvxoptB, Z)
                    info = solution["iterations"]
            U = solution['x']
            if log:
                C = Z - B @ U
                if (method == "mosek"):
                    print("    projection (mosek) : nb of contacts = ", Nc,
                          ", contrainte (Z-B@U).min() = ", C.min())
                else:
                    print("    projection (cvxopt) : nb of contacts = ", Nc,
                          ", nb of iterations = ", solution["iterations"],
                          ", status = ", solution["status"],
                          ", contrainte (Z-B@U).min() = ", C.min())
            U = sp.array(U).reshape((Np, 2))

        elif (method == "uzawa"):

            info = 0
            II = contacts[:, 0].astype(int)
            JJ = contacts[:, 1].astype(int)
            Jpos = sp.where(JJ >= 0)[0]
            Jneg = sp.where(JJ < 0)[0]
            row = sp.concatenate([Jpos, Jpos, Jpos, Jpos, Jneg, Jneg])
            col = sp.concatenate([
                2 * II[Jpos], 2 * II[Jpos] + 1, 2 * JJ[Jpos], 2 * JJ[Jpos] + 1,
                2 * II[Jneg], 2 * II[Jneg] + 1
            ])
            data = sp.concatenate([
                contacts[Jpos, 3], contacts[Jpos, 4], -contacts[Jpos, 3],
                -contacts[Jpos, 4], -contacts[Jneg, 3], -contacts[Jneg, 4]
            ])
            B = csr_matrix((data, (row, col)), shape=(Nc, 2 * Np))  #.toarray()
            L = sp.zeros((Nc, ))
            R = 99 * sp.ones((Nc, ))
            U = sp.zeros((2 * Np, ))
            V = sp.zeros((2 * Np, ))
            D = contacts[:, 2]
            V[::2] = Vd[:, 0]
            V[1::2] = Vd[:, 1]
            k = 0
            while ((dt * R.max() > tol * 2 * people[:, 2].min())
                   and (k < nb_iter_max)):
                U[:] = V[:] - B.transpose() @ L[:]
                R[:] = B @ U[:] - (D[:] - dmin) / dt
                L[:] = sp.maximum(L[:] + rho * R[:], 0)
                k += 1
            P = sp.zeros(Np)  ## Pressure
            P[II[Jpos]] += 3 / (4 * sp.pi * people[II[Jpos], 2]**2) * L[Jpos]
            P[JJ[Jpos]] += 3 / (4 * sp.pi * people[JJ[Jpos], 2]**2) * L[Jpos]
            P[II[Jneg]] += 3 / (4 * sp.pi * people[II[Jneg], 2]**2) * L[Jneg]
            if log:
                print("    projection (uzawa) : nb of contacts = ", Nc,
                      ", nb of iterations = ", k, ", min = ", R.min(),
                      ", max = ", R.max(), ", tol = ", tol)
            if (k == nb_iter_max):
                print("** WARNING : Method projection **")
                print(
                    "** WARNING : you have reached the maximum number of iterations,"
                )
                print("** WARNING : it remains unsatisfied constraints !! ")
                info = -1
            else:
                info = k

        return info, B, U.reshape((Np, 2)), L, P
예제 #37
0
    def __call__(self, Xi, Xj, ni, nj, hyper_deriv=None, symmetric=False):
        """Evaluate the covariance between points `Xi` and `Xj` with derivative order `ni`, `nj`.
        
        Parameters
        ----------
        Xi : :py:class:`Matrix` or other Array-like, (`M`, `D`)
            `M` inputs with dimension `D`.
        Xj : :py:class:`Matrix` or other Array-like, (`M`, `D`)
            `M` inputs with dimension `D`.
        ni : :py:class:`Matrix` or other Array-like, (`M`, `D`)
            `M` derivative orders for set `i`.
        nj : :py:class:`Matrix` or other Array-like, (`M`, `D`)
            `M` derivative orders for set `j`.
        hyper_deriv : Non-negative int or None, optional
            The index of the hyperparameter to compute the first derivative
            with respect to. If None, no derivatives are taken. Hyperparameter
            derivatives are not supported at this point. Default is None.
        symmetric : bool, optional
            Whether or not the input `Xi`, `Xj` are from a symmetric matrix.
            Default is False.
        
        Returns
        -------
        Kij : :py:class:`Array`, (`M`,)
            Covariances for each of the `M` `Xi`, `Xj` pairs.
        
        Raises
        ------
        NotImplementedError
            If the `hyper_deriv` keyword is not None.
        """
        if hyper_deriv is not None:
            raise NotImplementedError(
                "Hyperparameter derivatives have not been implemented!")
        n_cat = scipy.asarray(scipy.concatenate((ni, nj), axis=1), dtype=int)
        X_cat = scipy.asarray(scipy.concatenate((Xi, Xj), axis=1), dtype=float)
        n_cat_unique = unique_rows(n_cat)
        k = scipy.zeros(Xi.shape[0], dtype=float)
        # Loop over unique derivative patterns:
        if self.num_proc > 1:
            pool = multiprocessing.Pool(processes=self.num_proc)
        for n_cat_state in n_cat_unique:
            idxs = scipy.where(
                scipy.asarray((n_cat == n_cat_state).all(axis=1)).squeeze())[0]
            if (n_cat_state == 0).all():
                k[idxs] = self.cov_func(Xi[idxs, :], Xj[idxs, :], *self.params)
            else:
                if self.num_proc > 1 and len(idxs) > 1:
                    k[idxs] = scipy.asarray(pool.map(
                        _ArbitraryKernelEval(self, n_cat_state),
                        X_cat[idxs, :]),
                                            dtype=float)
                else:
                    for idx in idxs:
                        k[idx] = mpmath.chop(
                            mpmath.diff(self._mask_cov_func,
                                        X_cat[idx, :],
                                        n=n_cat_state,
                                        singular=True))

        if self.num_proc > 0:
            pool.close()
        return k
예제 #38
0
 def params(self):
     return scipy.concatenate((self.k1.params, self.k2.params))
예제 #39
0
 def fixed_params(self):
     return scipy.concatenate((self.k1.fixed_params, self.k2.fixed_params))
예제 #40
0
vtongue1_3 = vrest/s[:, None]

vrest = scipy.io.loadmat('/big_disk/ajoshi/with_andrew/100307/100307.\
tfMRI_LANGUAGE_RL.reduce3.ftdata.NLM_11N_hvar_5.mat')
LR_flag = msk['LR_flag']
LR_flag = np.squeeze(LR_flag) > 0
data = vrest['ftdata_NLM']
vrest = data[LR_flag]
vrest = vrest[ind_rois, ]
m = np.mean(vrest, 1)
vrest = vrest - m[:, None]
s = np.std(vrest, 1)+1e-116
vtongue1_4 = vrest/s[:, None]


vtongue1 = sp.concatenate((vtongue1_1, vtongue1_2, vtongue1_3, vtongue1_4), axis=1)
#vtongue1 = sp.concatenate((vtongue1_1, vtongue1_2), axis=1)


#vtongue1 = vtongue1[ind_rois,]
#vrest = nib.load('/big_disk/ajoshi/HCP5/' + sub + '/MNINonLinear/Resu\
#lts/rfMRI_REST2_LR/rfMRI_REST2_LR_Atlas_hp2000_clean.dtseries.nii')
vrest = scipy.io.loadmat('/big_disk/ajoshi/with_andrew/100307/100307.\
rfMRI_REST1_LR.reduce3.ftdata.NLM_11N_hvar_5.mat')
LR_flag = msk['LR_flag']
LR_flag = np.squeeze(LR_flag) > 0
data = vrest['ftdata_NLM']
#data = sp.squeeze(vrest.get_data()).T
vrest = data[LR_flag]
vrest = vrest[ind_rois, ]
vrest = vrest[:, :vtongue1.shape[1]]    # make their length same
예제 #41
0
def noise(shape: List[int],
          porosity=None,
          octaves: int = 3,
          frequency: int = 32,
          mode: str = 'simplex'):
    r"""
    Generate a field of spatially correlated random noise using the Perlin
    noise algorithm, or the updated Simplex noise algorithm.

    Parameters
    ----------
    shape : array_like
        The size of the image to generate in [Nx, Ny, Nz] where N is the
        number of voxels.

    porosity : float
        If specified, this will threshold the image to the specified value
        prior to returning.  If no value is given (the default), then the
        scalar noise field is returned.

    octaves : int
        Controls the *texture* of the noise, with higher octaves giving more
        complex features over larger length scales.

    frequency : array_like
        Controls the relative sizes of the features, with higher frequencies
        giving larger features.  A scalar value will apply the same frequency
        in all directions, given an isotropic field; a vector value will
        apply the specified values along each axis to create anisotropy.

    mode : string
        Which noise algorithm to use, either \'simplex\' (default) or
        \'perlin\'.

    Returns
    -------
    If porosity is given, then a boolean array with ``True`` values denoting
    the pore space is returned.  If not, then normally distributed and
    spatially correlated randomly noise is returned.

    Notes
    -----
    This method depends the a package called 'noise' which must be
    compiled. It is included in the Anaconda distribution, or a platform
    specific binary can be downloaded.

    See Also
    --------
    norm_to_uniform

    """
    try:
        import noise
    except ModuleNotFoundError:
        raise Exception("The noise package must be installed")
    shape = sp.array(shape)
    if sp.size(shape) == 1:
        Lx, Ly, Lz = sp.full((3, ), int(shape))
    elif len(shape) == 2:
        Lx, Ly = shape
        Lz = 1
    elif len(shape) == 3:
        Lx, Ly, Lz = shape
    if mode == 'simplex':
        f = noise.snoise3
    else:
        f = noise.pnoise3
    frequency = sp.atleast_1d(frequency)
    if frequency.size == 1:
        freq = sp.full(shape=[
            3,
        ], fill_value=frequency[0])
    elif frequency.size == 2:
        freq = sp.concatenate((frequency, [1]))
    else:
        freq = sp.array(frequency)
    im = sp.zeros(shape=[Lx, Ly, Lz], dtype=float)
    for x in range(Lx):
        for y in range(Ly):
            for z in range(Lz):
                im[x, y, z] = f(x=x / freq[0],
                                y=y / freq[1],
                                z=z / freq[2],
                                octaves=octaves)
    im = im.squeeze()
    if porosity:
        im = norm_to_uniform(im, scale=[0, 1])
        im = im < porosity
    return im
예제 #42
0
파일: bovy_plot.py 프로젝트: ritabanc/galpy
 def transform(self, xy):
     x = xy[:, 0:1]
     y = xy[:, 1:]
     r = sc.sqrt(x * x + y * y)
     theta = sc.arctan2(y, x)
     return sc.concatenate((theta, r), 1)
예제 #43
0
def generate_cluster(c, N, var):
    return sp.random.randn(N, 2) * var + sp.tile(c, (N, 1))


# create 2 sets of points for 2 clusters (one training, one testing)
a_mean = [.1, .2]
b_mean = [.3, .5]
std = .1
a_train = generate_cluster(a_mean, 10, std)
b_train = generate_cluster(b_mean, 200, std)

a_test = generate_cluster(a_mean, 20, std)
b_test = generate_cluster(b_mean, 20., std)

xtrain = sp.concatenate((a_train, b_train))
xtest = sp.concatenate((a_test, b_test))
ytrain = sp.concatenate(
    (sp.ones(a_train.shape[0]), sp.zeros(b_train.shape[0]))).reshape((-1, 1))
ytest = sp.concatenate(
    (sp.ones(a_test.shape[0]), sp.zeros(b_test.shape[0]))).reshape((-1, 1))

# plot the data
for data, style in zip([a_train, b_train, a_test, b_test],
                       ['g*', 'rd', 'go', 'rs']):
    pylab.plot(data[:, 0], data[:, 1], style, label='_nolegend_')

r = pylab.gca().get_xlim()

# Generate the nodes that will be trained
nodes = (
예제 #44
0
파일: mplexp.py 프로젝트: kryv/flame_tools
def get_latline(expn, types='all'):
    if types == 'all':
        tls = [
            'EDipole', 'EFocus', 'EQuad', 'HDipole', 'HMono', 'HQuad', 'AccGap'
        ]
    else:
        tls = list(types)

    lat_line = []

    stt = expn.zaxis[0]
    mid = expn.zaxis[-1] / 2
    end = expn.zaxis[-1]

    for etype in tls:
        if etype == 'EDipole':
            ln = np.array(expn.Er12)
        elif etype == 'EFocus':
            ln = np.array(expn.Er21)
        elif etype == 'EQuad':
            ln = np.array(expn.Er23)
        elif etype == 'HDipole':
            ln = np.array(expn.Hp12)
        elif etype == 'HMono':
            ln = np.array(expn.Hp21)
        elif etype == 'HQuad':
            ln = np.array(expn.Hp23)
        elif etype == 'AccGap':
            ln = np.array(expn.ezaxis)
        else:
            raise TypeError('Worng input types :' + etype)

        ln = np.around(ln / np.amax(np.absolute(ln)) * 1e7, 1) + 1e-256
        iln = np.sign(ln)
        bln = np.array(np.absolute((iln[:-1] - iln[1:]) / 2), dtype=bool)
        ids = np.arange(len(ln))[bln]
        p0 = expn.zaxis[ids] + np.absolute(ln[ids] /
                                           (ln[ids] - ln[ids + 1])) * expn.dz

        if not stt in p0:
            p0 = np.concatenate([p0, [stt]])

        if not mid in p0:
            p0 = np.concatenate([p0, [mid]])

        if not end in p0:
            p0 = np.concatenate([p0, [end]])

        p0.sort()

        bpls = np.array((p0[1:] - p0[:-1]) > expn.dz * 10, dtype=bool)
        ids2 = np.arange(len(p0))[bpls]
        if not len(ids2) == 0:
            ids2 = np.concatenate([ids2, [ids2[-1] + 1]])
            p0 = p0[ids2]

        print(etype, ids, p0)

        nn = 1
        for i in range(len(p0) - 1):
            lat_line.append({
                'name': etype[0:2] + str(nn).zfill(5),
                'type': etype,
                'range': [p0[i], p0[i + 1]]
            })
            nn += 1

    return lat_line
예제 #45
0
def synpitchseq(notes, fs=44100):
    """
    NOTES is a list of tuples (MIDI, T). MIDI is the pitch (in midi notation)
    and T is the duration in seconds.
    """
    return scipy.concatenate([synpitch(midi, T, fs) for (midi, T) in notes])
예제 #46
0
def plotLine(vector,
             val=1.0,
             close=False,
             tube_radius=None,
             index=None,
             **kwargs):
    """
    PlotLine creates a single plot object from a singular vector or from a n-dimensional
    tuple or list.
    """
    plot = False
    try:
        x = vector.x()
        temp0 = x[0]
        temp1 = x[1]
        temp2 = x[2]
        s = val * scipy.ones(temp0.shape)

        # For surface objects, this keyword allows for the last corner to connect with the first
        if close:
            temp0 = scipy.concatenate((temp0, scipy.atleast_1d(temp0[0])))
            temp1 = scipy.concatenate((temp1, scipy.atleast_1d(temp1[0])))
            temp2 = scipy.concatenate((temp2, scipy.atleast_1d(temp2[0])))
            s = scipy.concatenate((s, scipy.atleast_1d(s[0])))

        if not index is None:
            N = len(temp0)
            connect = scipy.vstack([
                scipy.arange(index, index + N - 1.5),
                scipy.arange(index + 1, index + N - .5)
            ]).T  # I want to rewrite this...
            index += N

    except AttributeError:

        temp0 = []
        temp1 = []
        temp2 = []
        s = []
        connect = []

        # if it is not some sort of vector or vector-derived class, iterate through and make a surface object
        if index is None:
            index = 0
            plot = True

        for i in vector:
            output = plotLine(i, close=close, index=index, **kwargs)
            temp0 += [output[0]]
            temp1 += [output[1]]
            temp2 += [output[2]]
            s += [output[3]]
            connect += [output[4]]
            index = output[5]

        #turn to arrays here so I don't accidentally nest lists or tuples
        temp0 = scipy.hstack(temp0)
        temp1 = scipy.hstack(temp1)
        temp2 = scipy.hstack(temp2)
        s = scipy.hstack(s)
        connect = scipy.vstack(connect)

    if index is None:

        try:
            mlab.plot3d(temp0,
                        temp1,
                        temp2,
                        s,
                        vmin=0.,
                        vmax=1.,
                        tube_radius=tube_radius,
                        **kwargs)
        except ValueError:
            mlab.plot3d(temp0.flatten(),
                        temp1.flatten(),
                        temp2.flatten(),
                        s.flatten(),
                        vmin=0.,
                        vmax=1.,
                        tube_radius=tube_radius,
                        **kwargs)

    else:
        if plot:
            # follows http://docs.enthought.com/mayavi/mayavi/auto/example_plotting_many_lines.html#example-plotting-many-lines

            src = mlab.pipeline.scalar_scatter(temp0, temp1, temp2, s)
            src.mlab_source.dataset.lines = connect
            lines = mlab.pipeline.stripper(src)
            mlab.pipeline.surface(lines, **kwargs)

        else:
            return (temp0, temp1, temp2, s, connect, index)
예제 #47
0
        gp.append([idx, idx + group[i]])
        idx += group[i]
    group = gp

    # Glasso Parameter selection by 5 fold cv
    optmu = muinit
    optmu2 = mu2init
    optcor = 0
    for j1 in range(7):
        for j2 in range(7):
            mu = muinit * (ps_step**j1)
            mu2 = mu2init * (ps_step**j2)
            cor = 0
            for k in range(5):  #5 for full 5 fold CV
                train1_idx = SP.concatenate(
                    (train_idx[:int(n_train * k * 0.2)],
                     train_idx[int(n_train * (k + 1) * 0.2):n_train]))
                valid_idx = train_idx[int(n_train * k *
                                          0.2):int(n_train * (k + 1) * 0.2)]
                res1 = lmm_lasso.train(X[train1_idx],
                                       K[train1_idx][:, train1_idx],
                                       y[train1_idx], mu, mu2, group)
                w1 = res1['weights']
                yhat = lmm_lasso.predict(y[train1_idx], X[train1_idx, :],
                                         X[valid_idx, :],
                                         K[train1_idx][:, train1_idx],
                                         K[valid_idx][:, train1_idx],
                                         res1['ldelta0'], w1)
                cor += SP.dot(
                    yhat.T - yhat.mean(), y[valid_idx] -
                    y[valid_idx].mean()) / (yhat.std() * y[valid_idx].std())
예제 #48
0
파일: runsim.py 프로젝트: jswoboda/SimISR
def fitdata(basedir,configfile,optinputs):
    """ This function will run the fitter on the estimated ACFs saved in h5 files.
        Inputs:
        basedir: A string for the directory that will hold all of the data for the simulation.
        configfile: The configuration file for the simulation.
        optinputs:A string that helps determine the what type of acfs will be fitted.
         """
    # determine the input folders which can be ACFs from the full simulation
    dirdict = {'fitting':('ACF', 'Fitted'), 'fittingmat':('ACFMat', 'FittedMat'),
               'fittinginv':('ACFInv', 'FittedInv'), 'fittingmatinv':('ACFMatInv', 'FittedMatInv')}
    dirio = dirdict[optinputs[0]]
    inputdir = basedir/dirio[0]
    outputdir = basedir/dirio[1]
    fitlist = optinputs[1]
    if len(optinputs) > 2:
        exstr = optinputs[2]
        printlines = optinputs[3]
    else:
        exstr = ''
    dirlist = [str(i) for i in inputdir.glob('*lags{0}.h5'.format(exstr))]
    dirlistsig = [str(i) for i in inputdir.glob('*sigs{0}.h5'.format(exstr))]

    Ionoin = IonoContainer.readh5(dirlist[0])
    if len(dirlistsig) == 0:
        Ionoinsig = None
    else:
        Ionoinsig = IonoContainer.readh5(dirlistsig[0])
    fitterone = Fitterionoconainer(Ionoin, Ionoinsig, configfile)

    fitoutput = fitterone.fitdata(specfuncs.ISRSfitfunction,
                                  fitterone.simparams['startfile'], fittimes=fitlist,
                                  printlines=printlines)

    #ipdb.set_trace()
    (fitteddata, fittederror, funcevals, fittedcov) = fitoutput
    if fitterone.simparams['Pulsetype'].lower() == 'barker':
        paramlist = fitteddata

        species = fitterone.simparams['species']
        paramnames = ['Ne']
        if not fittederror is None:
            fittederronly = sp.sqrt(fittederror)
            paramlist = sp.concatenate([fitteddata, fittederronly], axis=2)
            paramnamese = ['n'+ip for ip in paramnames]
            paranamsf = sp.array(paramnames+paramnamese)
        else:
            paranamsf = sp.array(paramnames)
    else:
        fittederronly = sp.sqrt(fittederror)
        paramnames = []
        species = fitterone.simparams['species']
        # Seperate Ti and put it in as an element of the ionocontainer.
        Ti = fitteddata[:, :, 1]

        nTi = fittederronly[:, :, 1]

        nTiTe = fittedcov[:, :, 0, 1]
        nTiNe = fittedcov[:, :, 0, 2]
        nTiVi = fittedcov[:, :, 0, 3]
        nTeNe = fittedcov[:, :, 1, 2]
        nTeVi = fittedcov[:, :, 1, 3]
        nNeVi = fittedcov[:, :, 2, 3]
        cov_list = [nTiTe[:, :, sp.newaxis], nTiNe[:, :, sp.newaxis],
                    nTiVi[:, :, sp.newaxis], nTeNe[:, :, sp.newaxis],
                    nTeVi[:, :, sp.newaxis], nNeVi[:, :, sp.newaxis]]
        cov_list_names = ['nTiTe', 'nTiNe', 'nTiVi', 'nTeNe', 'nTeVi','nNeVi']
        paramlist = sp.concatenate([fitteddata, Ti[:, :, sp.newaxis], fittederronly,
                                    nTi[:, :, sp.newaxis], funcevals[:, :, sp.newaxis]]
                                   + cov_list, axis=2)
        for isp in species[:-1]:
            paramnames.append('Ni_'+isp)
            paramnames.append('Ti_'+isp)
        paramnames = paramnames+['Ne', 'Te', 'Vi', 'Nepow', 'Ti']
        paramnamese = ['n'+ip for ip in paramnames]
        paranamsf = sp.array(paramnames+paramnamese+['FuncEvals']+cov_list_names)

    if fitlist is None:
        timevec = Ionoin.Time_Vector
    else:
        if len(fitlist) == 0:
            timevec = Ionoin.Time_Vector
        else:
            timevec = Ionoin.Time_Vector[fitlist]
    # This requires
    if set(Ionoin.Coord_Vecs) == {'x', 'y', 'z'}:
        newver = 0
        ionoout = IonoContainer(Ionoin.Cart_Coords, paramlist.real, timevec, ver=newver,
                                coordvecs=Ionoin.Coord_Vecs, paramnames=paranamsf,
                                species=species)
    elif set(Ionoin.Coord_Vecs) == {'r', 'theta', 'phi'}:
        newver = 1
        ionoout = IonoContainer(Ionoin.Sphere_Coords, paramlist.real, timevec, ver=newver,
                                coordvecs=Ionoin.Coord_Vecs, paramnames=paranamsf,
                                species=species)


    outfile = outputdir.joinpath('fitteddata{0}.h5'.format(exstr))
    ionoout.saveh5(str(outfile))
예제 #49
0
파일: combinators.py 프로젝트: wqren/pygp
 def get_hyperparameter_names(self):
     """return the names of hyperparameters to make identification easier"""
     names = []
     for covar in self.covars:
         names = sp.concatenate((names, covar.get_hyperparameter_names()))
     return names
예제 #50
0
    def _PollData(self):

        # for lag measurement
        idx = 0
        start = perf_counter()
        self.timer = {'idx': [], 'elt': []}

        # get stream time
        streamTime = time()

        # clear from last run
        self._recordFlags = {'dataloss': False, 'invTimeStamp': False}

        # check status of device... start if OK
        if self.comPortStatus:

            self.comPort.subscribe(self._recordingDevices)

            # clear old data from polling buffer
            self.comPort.sync()

            while self._poll:

                # lock thread to savely process
                self._pollLocker.acquire()

                # for lag debugging
                self.timer['idx'].append(idx)
                idx += 1
                self.timer['elt'].append(perf_counter() - start)

                # for lag debugging
                start = perf_counter()

                # fetch data
                # block for 1 ms, timeout 10 ms, throw error if data is lost and return flat dictionary
                # NOTE: poll downloads all data since last poll, sync or subscription
                dataBuf = self.comPort.poll(1e-3, 10, 0x04, True)

                # get all demods in data stream
                for key in dataBuf.keys():

                    # check if demodulator is already in dict, add if not (with standard structure)
                    if key not in self._demods.keys():
                        self._demods.update(
                            {key: self._GetStandardRecordStructure()})

                    # fill structure with new data
                    for k in self._demods[key].keys():
                        if k in dataBuf[key].keys():
                            self._demods[key][k] = sp.concatenate(
                                [self._demods[key][k], dataBuf[key][k]])

                            # save flags for later use in GUI
                            # look at dataloss and invalid time stamps
                            if k in ['dataloss', 'invalidtimestamp'
                                     ] and dataBuf[key][k]:
                                self.logger.warning(
                                    '%s was recognized! Data might be corrupted!'
                                    % k)
                                self._recordFlags[k] = True

########################################################
#   --- THIS IS HERE FOR SIMPLE PLOTTING REASONS ---   #
########################################################

#                    # get data from current demodulator
#                    x = dataBuf[key]['x']
#                    y = dataBuf[key]['y']
#                    # calc abs value from real+imag
#                    r = np.sqrt(x**2 + y**2)
#
#                    # check if demodulator is already in dict, add if not (with standard structure)
#                    if key not in self.demods.keys():
#                        self.demods.update({key: self.GetStandardHf2Dict()})
#
#                        # store first timestamp as a reference, if not available
#                        if self.demods[key]['timeRef'] == -1:
#                            self.demods[key]['timeRef'] = dataBuf[key]['timestamp'][0]
#
#                    # calculate real time with reference and clock base and append to array
#                    self.demods[key]['time'] = np.concatenate([self.demods[key]['time'], (dataBuf[key]['timestamp'] - self.demods[key]['timeRef']) / 210e6])
#
#                    # append data points
#                    self.demods[key]['r'] = np.concatenate([self.demods[key]['r'], r])

# if file size is around 10 MB create a new one
#                if (self.total_size(self.demods) // 1024**2) > (self._maxStrmFlSize-1):
                if (time() - streamTime) / 60 > self.__maxStrmTime__:
                    self.WriteMatFileToDisk()
                    streamTime = time()

                # critical stuff is done, release lock
                self._pollLocker.release()

                # unsubscribe after finished record event
            self.comPort.unsubscribe('*')
예제 #51
0
def armorf(x,ntrls,npts,p):
    from scipy import shape, array, matrix, zeros, disp, concatenate, eye, dstack
    from numpy import linalg # for inverse and Cholesky factorization;
    import numpy as np
    inv=linalg.inv; # Make name consistent with Matlab
    
    # Initialization 
    x=matrix(x)
    [L,N]=shape(x);      # L is the number of channels, N is the npts*ntrls 
    R0=R0f=R0b=pf=pb=pfb=ap=bp=En=matrix(zeros((L,L,1)));    # covariance matrix at 0, 
        
    # calculate the covariance matrix? 
    for i in range(ntrls):
       En=En+x[:,i*npts:(i+1)*npts]*x[:,i*npts:(i+1)*npts].H; 
       ap=ap+x[:,i*npts+1:(i+1)*npts]*x[:,i*npts+1:(i+1)*npts].H;         
       bp=bp+x[:,i*npts:(i+1)*npts-1]*x[:,i*npts:(i+1)*npts-1].H; 
        
    ap = inv((ckchol(ap/ntrls*(npts-1)).T).H); 
    bp = inv((ckchol(bp/ntrls*(npts-1)).T).H); 
        
    for i in range(ntrls): 
       efp = ap*x[:,i*npts+1:(i+1)*npts]; 
       ebp = bp*x[:,i*npts:(i+1)*npts-1]; 
       pf = pf + efp*efp.H; 
       pb = pb + ebp*ebp.H; 
       pfb = pfb + efp*ebp.H; 
  
    En = (ckchol(En/N).T).H;       # Covariance of the noise 
    
    # Initial output variables
    tmp=[]
    for i in range(L): tmp.append([]) # In Matlab, coeff=[], and anything can be appended to that.
    coeff = matrix(tmp);#  Coefficient matrices of the AR model 
    kr = matrix(tmp);  # reflection coefficients 
    aparr=array(ap) # Convert AP matrix to an array, so it can be dstacked
    bparr=array(bp)
    
    for m in range(p): 
      # Calculate the next order reflection (parcor) coefficient 
      ck = inv((ckchol(pf).T).H)*pfb*inv(ckchol(pb).T);  
      kr=concatenate((kr,ck),1); 
      # Update the forward and backward prediction errors 
      ef = eye(L)- ck*ck.H; 
      eb = eye(L)- ck.H*ck; 
        
      # Update the prediction error 
      En = En*(ckchol(ef).T).H;
      E = (ef+eb)/2;

      # Update the coefficients of the forward and backward prediction errors 
      Z=zeros((L,L)) # Make it easier to define this
      aparr=dstack((aparr,Z))
      bparr=dstack((bparr,Z))
      pf = pb = pfb = Z
      # Do some variable juggling to handle Python's array/matrix limitations
      a=b=zeros((L,L,0))

      for i in range(m+2):  
          tmpap1=matrix(aparr[:,:,i]) # Need to convert back to matrix to perform operations
          tmpbp1=matrix(bparr[:,:,i])
          tmpap2=matrix(aparr[:,:,m+1-i])
          tmpbp2=matrix(bparr[:,:,m+1-i])
          tmpa = inv((ckchol(ef).T).H)*(tmpap1-ck*tmpbp2); 
          tmpb = inv((ckchol(eb).T).H)*(tmpbp1-ck.H*tmpap2); 
          a=dstack((a,array(tmpa)))
          b=dstack((b,array(tmpb)))

      for k in range(ntrls):
          efp = zeros((L,npts-m-2)); 
          ebp = zeros((L,npts-m-2)); 
          for i in range(m+2): 
              k1=m+2-i+k*npts; 
              k2=npts-i+k*npts; 
              efp = efp+matrix(a[:,:,i])*matrix(x[:,k1:k2]); 
              ebp = ebp+matrix(b[:,:,m+1-i])*matrix(x[:,k1-1:k2-1]); 
          pf = pf + efp*efp.H; 
          pb = pb + ebp*ebp.H; 
          pfb = pfb + efp*ebp.H; 

      aparr = a; 
      bparr = b; 
    
    for j in range(p):
       coeff = concatenate((coeff,inv(matrix(a[:,:,0]))*matrix(a[:,:,j+1])),1); 

    return coeff, En*En.H, kr
예제 #52
0
data = dataref['ftdata_NLM']
sub1 = data[LR_flag, :]
m = np.mean(sub1, 1)
sub1 = sub1 - m[:, None]
s = np.std(sub1, 1) + 1e-16
sub1 = sub1 / (s[:, None] * sp.sqrt(1200))

data = datasub['ftdata_NLM']
sub2 = data[LR_flag, :]
m = np.mean(sub2, 1)
sub2 = sub2 - m[:, None]
s = np.std(sub2, 1) + 1e-16
sub2 = sub2 / (s[:, None] * sp.sqrt(1200))

msk_small_region = np.in1d(dfs_left.labels, roilist)
sub = sp.concatenate((sub1[msk_small_region, :], sub2[msk_small_region, :]),
                     axis=0)
pca = PCA(n_components=3)
pca.fit(sub)

sub2_rot, _ = rot_sub_data(sub1, sub2)

sub1_3d = pca.transform(sub1)
sub2_3d = pca.transform(sub2)
sub2_rot_3d = pca.transform(sub2_rot)

print(sub1.shape)
sub1 = sub1_3d
sub2 = sub2_3d
sub2_rot = sub2_rot_3d
#sub1=sp.random.rand(sub1.shape[0],sub1.shape[1])-.5
#sub2=sp.random.rand(sub2.shape[0],sub2.shape[1])-.5
예제 #53
0
def snow_dual(im, voxel_size=1,
              boundary_faces=['top', 'bottom', 'left', 'right', 'front', 'back'],
              marching_cubes_area=False):

    r"""
    Extracts a dual pore and solid network from a binary image using a modified
    version of the SNOW algorithm

    Parameters
    ----------
    im : ND-array
        Binary image in the Boolean form with True’s as void phase and False’s
        as solid phase. It can process the inverted configuration of the
        boolean image as well, but output labelling of phases will be inverted
        and solid phase properties will be assigned to void phase properties
        labels which will cause confusion while performing the simulation.
    voxel_size : scalar
        The resolution of the image, expressed as the length of one side of a
        voxel, so the volume of a voxel would be **voxel_size**-cubed.  The
        default is 1, which is useful when overlaying the PNM on the original
        image since the scale of the image is alway 1 unit lenth per voxel.
    boundary_faces : list of strings
        Boundary faces labels are provided to assign hypothetical boundary
        nodes having zero resistance to transport process. For cubical
        geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,
        ‘front’ and ‘back’ face labels to assign boundary nodes. If no label is
        assigned then all six faces will be selected as boundary nodes
        automatically which can be trimmed later on based on user requirements.
    marching_cubes_area : bool
        If ``True`` then the surface area and interfacial area between regions
        will be using the marching cube algorithm. This is a more accurate
        representation of area in extracted network, but is quite slow, so
        it is ``False`` by default.  The default method simply counts voxels
        so does not correctly account for the voxelated nature of the images.

    Returns
    -------
    A dictionary containing all the void and solid phase size data, as well as
    the network topological information.  The dictionary names use the OpenPNM
    convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
    directly to an OpenPNM network object using the ``update`` command.

    References
    ----------
    [1] Gostick, J. "A versatile and efficient network extraction algorithm
    using marker-based watershed segmenation".  Phys. Rev. E 96, 023307 (2017)

    [2] Khan, ZA et al.  "Dual network extraction algorithm to investigate
    multiple transport processes in porous materials: Image-based modeling
    of pore and grain-scale processes. Computers and Chemical Engineering.
    123(6), 64-77 (2019)

    """
    # -------------------------------------------------------------------------
    # SNOW void phase
    pore_regions = snow_partitioning(im, return_all=True)
    # SNOW solid phase
    solid_regions = snow_partitioning(~im, return_all=True)
    # -------------------------------------------------------------------------
    # Combined Distance transform of two phases.
    pore_dt = pore_regions.dt
    solid_dt = solid_regions.dt
    dt = pore_dt + solid_dt
    pore_peaks = pore_regions.peaks
    solid_peaks = solid_regions.peaks
    peaks = pore_peaks + solid_peaks
    # Calculates combined void and solid regions for dual network extraction
    pore_regions = pore_regions.regions
    solid_regions = solid_regions.regions
    pore_region = pore_regions*im
    solid_region = solid_regions*~im
    solid_num = sp.amax(pore_regions)
    solid_region = solid_region + solid_num
    solid_region = solid_region * ~im
    regions = pore_region + solid_region
    b_num = sp.amax(regions)
    # -------------------------------------------------------------------------
    # Boundary Conditions
    regions = add_boundary_regions(regions=regions, faces=boundary_faces)
    # -------------------------------------------------------------------------
    # Padding distance transform to extract geometrical properties
    f = boundary_faces
    if f is not None:
        if im.ndim == 2:
            faces = [(int('left' in f)*3, int('right' in f)*3),
                     (int(('front') in f)*3 or int(('bottom') in f)*3,
                      int(('back') in f)*3 or int(('top') in f)*3)]
        if im.ndim == 3:
            faces = [(int('left' in f)*3, int('right' in f)*3),
                     (int('front' in f)*3, int('back' in f)*3),
                     (int('top' in f)*3, int('bottom' in f)*3)]
        dt = sp.pad(dt, pad_width=faces, mode='edge')
    else:
        dt = dt
    # -------------------------------------------------------------------------
    # Extract void,solid and throat information from image
    net = regions_to_network(im=regions, dt=dt, voxel_size=voxel_size)
    # -------------------------------------------------------------------------
    # -------------------------------------------------------------------------
    # Extract marching cube surface area and interfacial area of regions
    if marching_cubes_area:
        areas = region_surface_areas(regions=regions)
        interface_area = region_interface_areas(regions=regions, areas=areas,
                                                voxel_size=voxel_size)
        net['pore.surface_area'] = areas * voxel_size**2
        net['throat.area'] = interface_area.area
    # -------------------------------------------------------------------------
    # Find void to void, void to solid and solid to solid throat conns
    loc1 = net['throat.conns'][:, 0] < solid_num
    loc2 = net['throat.conns'][:, 1] >= solid_num
    loc3 = net['throat.conns'][:, 1] < b_num
    pore_solid_labels = loc1 * loc2 * loc3

    loc4 = net['throat.conns'][:, 0] >= solid_num
    loc5 = net['throat.conns'][:, 0] < b_num
    solid_solid_labels = loc4 * loc2 * loc5 * loc3

    loc6 = net['throat.conns'][:, 1] < solid_num
    pore_pore_labels = loc1 * loc6

    loc7 = net['throat.conns'][:, 1] >= b_num
    boundary_throat_labels = loc5 * loc7

    solid_labels = ((net['pore.label'] > solid_num) * ~
                    (net['pore.label'] > b_num))
    boundary_labels = net['pore.label'] > b_num
    b_sa = sp.zeros(len(boundary_labels[boundary_labels == 1.0]))
    # -------------------------------------------------------------------------
    # Calculates void interfacial area that connects with solid and vice versa
    p_conns = net['throat.conns'][:, 0][pore_solid_labels]
    ps = net['throat.area'][pore_solid_labels]
    p_sa = sp.bincount(p_conns, ps)
    s_conns = net['throat.conns'][:, 1][pore_solid_labels]
    s_pa = sp.bincount(s_conns, ps)
    s_pa = sp.trim_zeros(s_pa)  # remove pore surface area labels
    p_solid_surf = sp.concatenate((p_sa, s_pa, b_sa))
    # -------------------------------------------------------------------------
    # Calculates interfacial area using marching cube method
    if marching_cubes_area:
        ps_c = net['throat.area'][pore_solid_labels]
        p_sa_c = sp.bincount(p_conns, ps_c)
        s_pa_c = sp.bincount(s_conns, ps_c)
        s_pa_c = sp.trim_zeros(s_pa_c)  # remove pore surface area labels
        p_solid_surf = sp.concatenate((p_sa_c, s_pa_c, b_sa))
    # -------------------------------------------------------------------------
    # Adding additional information of dual network
    net['pore.solid_void_area'] = (p_solid_surf * voxel_size**2)
    net['throat.void'] = pore_pore_labels
    net['throat.interconnect'] = pore_solid_labels
    net['throat.solid'] = solid_solid_labels
    net['throat.boundary'] = boundary_throat_labels
    net['pore.void'] = net['pore.label'] <= solid_num
    net['pore.solid'] = solid_labels
    net['pore.boundary'] = boundary_labels

    class network_dict(dict):
        pass
    net = network_dict(net)
    net.im = im
    net.dt = dt
    net.regions = regions
    net.peaks = peaks
    net.pore_dt = pore_dt
    net.pore_regions = pore_region
    net.pore_peaks = pore_peaks
    net.solid_dt = solid_dt
    net.solid_regions = solid_region
    net.solid_peaks = solid_peaks

    return net
예제 #54
0
def threshold_detection(data, th, min_dist=1, mode='gt', find_max=True):
    """detect events by applying a threshold to the data

    :type data: ndarray
    :param data: the 2d-data to apply the threshold on. channels are in the
        second dimension (columns).
        Required
    :type th: ndarray or list
    :param th: list of threshold values, one value per channel in the `data`
        Required
    :type min_dist: int
    :param min_dist: minimal distance two successive events have to be
        separated in samples, else the event is ignored.
        Default=1
    :type mode: str
    :param mode: one of 'gt' for greater than or 'lt' for less than. will
        determine how the threshold is applied.
        Default='gt'
    :type find_max: bool
    :param find_max: if True, will find the maximum for each event epoch, else
        will find the start for each event epoch.
        Default=True
    :rtype: ndarray
    :returns: event samples
    """

    # checks
    data = sp.asarray(data)
    if data.ndim != 2:
        if data.ndim == 1:
            data = sp.atleast_2d(data).T
        else:
            raise ValueError('data.ndim != 2')
    th = sp.asarray(th)
    if th.ndim != 1:
        raise ValueError('th.ndim != 1')
    if th.size != data.shape[1]:
        raise ValueError('thresholds have to match the data channel count')
    if mode not in ['gt', 'lt']:
        raise ValueError('unknown mode, use one of \'lt\' or \'gt\'')
    if min_dist < 1:
        min_dist = 1

    # inits
    rval = []
    ep_func = {
        'gt': lambda d, t: epochs_from_binvec(d > t).tolist(),
        'lt': lambda d, t: epochs_from_binvec(d < t).tolist(),
    }[mode]

    # per channel detection
    for c in xrange(data.shape[1]):
        epochs = ep_func(data[:, c], th[c])
        if len(epochs) == 0:
            continue
        for e in xrange(len(epochs)):
            rval.append(epochs[e][0])
            if find_max is True:
                rval[-1] += data[epochs[e][0]:epochs[e][1] + 1, c].argmax()
    rval = sp.asarray(rval, dtype=INDEX_DTYPE)

    # do we have events?
    if rval.size == 0:
        return rval

    # drop event duplicates by sorting and checking for min_dist
    rval.sort()
    rval = rval[sp.diff(sp.concatenate(([0], rval))) >= min_dist]

    # return
    return rval
예제 #55
0
 def __call__(self, points):
     res = []
     for v in s.arange(self.n):
         p_aug = s.concatenate((points, s.array([v])), axis=0)
         res.append(self.itp(p_aug))
     return res
예제 #56
0
파일: lmm.py 프로젝트: quanrd/GWAS_Pipeline
    def setG(self, G0=None, G1=None, a2=0.0, K0=None, K1=None):
        '''
        set the Kernel (1-a2)*K0 and a2*K1 from G0 and G1.
        This has to be done before setting the data setX() and setY(). 

        If k0+k1>>N and similar kernels are used repeatedly, it is beneficial to precompute
        the kernel and pass it as an argument.
        ----------------------------------------------------------------------------
        Input:
        G0              : [N*k0] array of random effects
        G1              : [N*k1] array of random effects (optional)
        a2              : mixture weight between K0=G0*G0^T and K1=G1*G1^T

        K0              : [N*N] array, random effects covariance (positive semi-definite)
        K1              : [N*N] array, random effects covariance (positive semi-definite)(optional)
        -----------------------------------------------------------------------------
        '''
        self.G0 = G0
        self.G1 = G1
        if a2 < 0.0:
            a2 = 0.0
        if a2 > 1.0:
            a2 = 1.0

        if G1 is None and G0 is not None:
            self.G = G0
        elif G0 is not None and G1 is not None:
            #build the weighted concatenation of G0 and G1 = varianceComponent
            if a2 == 0.0:
                logging.info("a2=0.0, only using G0")
                self.G = G0
            elif a2 == 1.0:
                self.G = G1
                logging.info("a2=1.0, only using G1")
            else:
                self.G = SP.concatenate(
                    (SP.sqrt(1.0 - a2) * G0, SP.sqrt(a2) * G1), 1)

        else:
            self.G = None

        if self.G is not None:
            N = self.G.shape[0]
            k = self.G.shape[1]
        else:
            N = K0.shape[0]
            k = N
        if k > 0:
            if ((not self.forcefullrank) and (k < N)):
                #it is faster using the eigen decomposition of G.T*G but this is more accurate
                try:
                    [U, S, V] = LA.svd(self.G, full_matrices=False)
                    if np.any(S < -0.1):
                        logging.warning(
                            "kernel contains a negative Eigenvalue")
                    self.U = U
                    self.S = S * S

                except LA.LinAlgError:  # revert to Eigenvalue decomposition
                    logging.warning(
                        "Got SVD exception, trying eigenvalue decomposition of square of G. Note that this is a little bit less accurate"
                    )
                    [S_, V_] = LA.eigh(self.G.T.dot(self.G))
                    if np.any(S_ < -0.1):
                        logging.warning(
                            "kernel contains a negative Eigenvalue")
                    S_nonz = (S_ > 0)
                    self.S = S_[S_nonz]
                    self.S *= (N / self.S.sum())
                    self.U = self.G.dot(V_[:, S_nonz] / SP.sqrt(self.S))
            else:
                if K0 is None:
                    K0 = self.G0.dot(self.G0.T)
                self.K0 = K0
                if (self.G1 is not None) and (K1 is None):
                    K1 = self.G1.dot(self.G1.T)
                self.setK(K0=K0, K1=K1, a2=a2)
                #K=self.G.dot(self.G.T)
                #self.setK(K)
            self.a2 = a2
            pass
        else:  #rank of kernel = 0 (linear regression case)
            self.S = SP.zeros((0))
            self.U = SP.zeros_like(self.G)
예제 #57
0
def param_dict_to_list(dict, skeys=None):
    """convert from param dictionary to list"""
    #sort keys
    RV = SP.concatenate([dict[key].flatten() for key in skeys])
    return RV
    pass
예제 #58
0
def compute_contacts(dom, people, dmax):
    """
    This function uses a KDTree method to find the contacts \
    between individuals. Moreover the contacts with the walls \
    are also determined from the wall distance (obtained by the \
    fast-marching method).

    Parameters
    ----------
    dom: Domain
        contains everything for managing the domain
    people: numpy array
        people coordinates and radius : x,y,r
    dmax: float
        threshold value used to consider a contact as \
        active (dij<dmax)

    Returns
    -------
    contacts: numpy array
        all the contacts i,j,dij,eij_x,eij_y such that dij<dmax \
        and i<j (no duplication)
    """
    # lf : the number of points at which the algorithm
    # switches over to brute-force. Has to be positive.
    lf = 100
    if (lf > sys.getrecursionlimit()):
        sys.setrecursionlimit(lf)
    kd = cKDTree(people[:, :2], leafsize=lf)
    ## Find all pairs of points whose distance is at most dmax+2*rmax
    rmax = people[:, 2].max()
    neighbors = kd.query_ball_tree(kd, dmax + 2 * rmax)
    ## Create the contact array : i,j,dij,eij_x,eij_y
    first_elements = sp.arange(people.shape[0])  ## i.e. i
    other_elements = list(map(lambda x: x[1:],
                              neighbors))  ## i.e. all the j values for each i
    lengths = list(map(len, other_elements))
    tt = sp.stack([first_elements, lengths], axis=1)
    I = sp.concatenate(list(map(lambda x: sp.full((x[1], ), x[0]),
                                tt))).astype(int)
    J = sp.concatenate(other_elements).astype(int)
    ind = sp.where(I < J)[0]
    I = I[ind]
    J = J[ind]
    DP = people[J, :2] - people[I, :2]
    Norm = sp.linalg.norm(DP, axis=1, ord=2)
    Dij = Norm - people[I, 2] - people[J, 2]
    ind = sp.where(Dij < dmax)[0]
    Dij = Dij[ind]
    I = I[ind]
    J = J[ind]
    Norm = Norm[ind]
    DP = DP[ind]
    contacts = sp.stack([I, J, Dij, DP[:, 0] / Norm, DP[:, 1] / Norm], axis=1)
    # Add contacts with the walls
    II = sp.floor((people[:, 1] - dom.ymin - 0.5 * dom.pixel_size) /
                  dom.pixel_size).astype(int)
    JJ = sp.floor((people[:, 0] - dom.xmin - 0.5 * dom.pixel_size) /
                  dom.pixel_size).astype(int)
    DD = dom.wall_distance[II, JJ] - people[:, 2]
    ind = sp.where(DD < dmax)[0]
    wall_contacts = sp.stack([
        ind, -1 * sp.ones(ind.shape), DD[ind],
        dom.wall_grad_X[II[ind], JJ[ind]], dom.wall_grad_Y[II[ind], JJ[ind]]
    ],
                             axis=1)
    contacts = sp.vstack([contacts, wall_contacts])
    return sp.array(contacts)
예제 #59
0
def plot_manhattan(posCum,pv,chromBounds=None,
					thr=None,qv=None,lim=None,xticklabels=True,
					alphaNS=0.1,alphaS=0.5,colorNS='DarkBlue',colorS='Orange',plt=None,thr_plotting=None,labelS=None,labelNS=None):
	"""
	This script makes a manhattan plot
	-------------------------------------------
	posCum			cumulative position
	pv				pvalues
	chromBounds		chrom boundaries (optionally). If not supplied, everything will be plotted into a single chromosome
	qv				qvalues
					if provided, threshold for significance is set on qvalues but pvalues are plotted
	thr				threshold for significance
					default: 0.01 bonferroni correceted significance levels if qvs are not specified,
					or 0.01 on qvs if qvs specified
	lim				top limit on y-axis
					if not provided, -1.2*log(pv.min()) is taken
	xticklabels		if true, xtick labels are printed
	alphaNS			transparency of non-significant SNPs
	alphaS			transparency of significant SNPs
	plt				matplotlib.axes.AxesSubplot, the target handle for this figure (otherwise current axes)
	thr_plotting	plot only P-values that are smaller than thr_plotting to speed up plotting
    labelS           optional plotting label (significant loci)
    labelNS          optional plotting label (non significnat loci)
	"""
	if plt is None:
		plt = pl.gca()

	if thr==None:
		thr = 0.01/float(posCum.shape[0])

	if lim==None:
		lim=-1.2*sp.log10(sp.minimum(pv.min(),thr))

	if chromBounds is None:
		chromBounds = sp.array([[0,posCum.max()]])
	else:
		chromBounds = sp.concatenate([chromBounds,sp.array([posCum.max()])])


	n_chroms = chromBounds.shape[0]
	for chrom_i in range(0,n_chroms-1,2):
		pl.fill_between(posCum,0,lim,where=(posCum>chromBounds[chrom_i]) & (posCum<chromBounds[chrom_i+1]),facecolor='LightGray',linewidth=0,alpha=0.5)

	if thr_plotting is not None:
		if pv is not None:
			i_small = pv<thr_plotting
		elif qv is not None:
			i_small = qv<thr_plotting

		if qv is not None:
			qv = qv[i_small]
		if pv is not None:
			pv = pv[i_small]
		if posCum is not None:
			posCum=posCum[i_small]

	if qv==None:
		Isign = pv<thr
	else:
		Isign = qv<thr

	pl.plot(posCum[~Isign],-sp.log10(pv[~Isign]),'.',color=colorNS,ms=5,alpha=alphaNS,label=labelNS)
	pl.plot(posCum[Isign], -sp.log10(pv[Isign]), '.',color=colorS,ms=5,alpha=alphaS,label=labelS)

	if qv is not None:
		pl.plot([0,posCum.max()],[-sp.log10(thr),-sp.log10(thr)],'--',color='Gray')

	pl.ylim(0,lim)

	pl.ylabel('-log$_{10}$pv')
	pl.xlim(0,posCum.max())
	xticks = sp.array([chromBounds[i:i+2].mean() for i in range(chromBounds.shape[0]-1)])
	plt.set_xticks(xticks)
	pl.xticks(fontsize=6)

	if xticklabels:
		plt.set_xticklabels(sp.arange(1,n_chroms+1))
		pl.xlabel('genetic position')
	else:
		plt.set_xticklabels([])

	plt.spines["right"].set_visible(False)
	plt.spines["top"].set_visible(False)
	plt.xaxis.set_ticks_position('bottom')
	plt.yaxis.set_ticks_position('left')
예제 #60
0
    def _getScalesPairwise(self, verbose=False, initDiagonal=False):
        """
        Internal function for parameter initialization
        Uses a single trait model for initializing variances and
        a pairwise model to initialize correlations
        """
        var = sp.zeros((self.P, 2))

        if initDiagonal:
            #1. fit single trait model
            if verbose:
                print('.. fit single-trait model for initialization')
            vc = VarianceDecomposition(self.Y[:, 0:1])
            for term_i in range(self.n_randEffs):
                if term_i == self.noisPos:
                    vc.addRandomEffect(is_noise=True)
                else:
                    K = self.vd.getTerm(term_i).getK()
                    vc.addRandomEffect(K=K)
            scales0 = sp.sqrt(0.5) * sp.ones(2)

            for p in range(self.P):
                if verbose: print(('   .. trait %d' % p))
                vc.setY(self.Y[:, p:p + 1])
                conv = vc.optimize(scales0=scales0)
                if not conv:
                    print('warning initialization not converged')
                var[p, :] = vc.getVarianceComps()[0, :]

        elif fastlmm_present:
            if verbose:
                print(
                    '.. fit single-trait model for initialization (using fastlmm)'
                )
            for p in range(self.P):
                if verbose: print(('   .. trait %d' % p))
                covariates = None
                for term_i in range(self.n_randEffs):
                    if term_i == self.noisPos:
                        pass
                    else:
                        K = self.vd.getTerm(term_i).getK()
                varY = sp.var(self.Y[:, p:p + 1])
                lmm = fastLMM(X=covariates, Y=self.Y[:, p:p + 1], G=None, K=K)
                opt = lmm.findH2(nGridH2=100)
                h2 = opt['h2']
                var[p, :] = h2 * varY
                var[p, self.noisPos] = (1.0 - h2) * varY
                #;ipdb.set_trace()
        else:
            if verbose:
                print('.. random initialization of diagonal')
            var = sp.random.randn(var.shape[0], var.shape[1])
            var = var * var + 0.001
        #2. fit pairwise model
        if verbose:
            print('.. fit pairwise model for initialization')
        vc = VarianceDecomposition(self.Y[:, 0:2])
        for term_i in range(self.n_randEffs):
            if term_i == self.noisPos:
                vc.addRandomEffect(is_noise=True, trait_covar_type='freeform')
            else:
                K = self.vd.getTerm(term_i).getK()
                vc.addRandomEffect(K=K, trait_covar_type='freeform')
        rho_g = sp.ones((self.P, self.P))
        rho_n = sp.ones((self.P, self.P))
        for p1 in range(self.P):
            for p2 in range(p1):
                if verbose:
                    print(('   .. fit pair (%d,%d)' % (p1, p2)))
                vc.setY(self.Y[:, [p1, p2]])
                scales0 = sp.sqrt(
                    sp.array([
                        var[p1, 0], 1e-4, var[p2, 0], 1e-4, var[p1, 1], 1e-4,
                        var[p2, 1], 1e-4
                    ]))
                conv = vc.optimize(scales0=scales0)
                if not conv:
                    print('warning initialization not converged')
                Cg = vc.getTraitCovar(0)
                Cn = vc.getTraitCovar(1)
                rho_g[p1, p2] = Cg[0, 1] / sp.sqrt(Cg.diagonal().prod())
                rho_n[p1, p2] = Cn[0, 1] / sp.sqrt(Cn.diagonal().prod())
                rho_g[p2, p1] = rho_g[p1, p2]
                rho_n[p2, p1] = rho_n[p1, p2]
        #3. init
        Cg0 = rho_g * sp.dot(sp.sqrt(var[:, 0:1]), sp.sqrt(var[:, 0:1].T))
        Cn0 = rho_n * sp.dot(sp.sqrt(var[:, 1:2]), sp.sqrt(var[:, 1:2].T))
        offset_g = abs(sp.minimum(sp.linalg.eigh(Cg0)[0].min(), 0)) + 1e-4
        offset_n = abs(sp.minimum(sp.linalg.eigh(Cn0)[0].min(), 0)) + 1e-4
        Cg0 += offset_g * sp.eye(self.P)
        Cn0 += offset_n * sp.eye(self.P)
        Lg = sp.linalg.cholesky(Cg0)
        Ln = sp.linalg.cholesky(Cn0)
        Cg_params0 = sp.concatenate([Lg[:, p][:p + 1] for p in range(self.P)])
        Cn_params0 = sp.concatenate([Ln[:, p][:p + 1] for p in range(self.P)])
        scales0 = sp.concatenate(
            [Cg_params0, 1e-2 * sp.ones(1), Cn_params0, 1e-2 * sp.ones(1)])

        return scales0