示例#1
0
文件: MNEfit.py 项目: MarvinT/pyMNE
def MNEfit(stim,resp,order):
    # in order for dlogloss to work, we need to know -<g(yt(n),xt)>data
    # == calculate the constrained averages over the data set
    Nsamples = sp.size(stim,0)
    Ndim = sp.size(stim,1)
    psp = sp.mean(sp.mean(resp)) #spike probability (first constraint)
    avg = (1.0*stim.T*resp)/(Nsamples*1.0)
    avgs = sp.vstack((psp,avg))
    if(order > 1):
        avgsqrd = (stim.T*1.0)*(sp.array(sp.tile(resp,(1,Ndim)))*sp.array(stim))/(Nsamples*1.0)
        avgsqrd = sp.reshape(avgsqrd,(Ndim**2,1))
        avgs = sp.vstack((avgs,avgsqrd))
    
    #initialize params:
    pstart = sp.log(1/avgs[0,0] - 1)
    pstart = sp.hstack((pstart,(.001*(2*sp.random.rand(Ndim)-1))))
    if(order > 1):
        temp = .0005*(2*sp.random.rand(Ndim,Ndim)-1)
        pstart = sp.hstack((pstart,sp.reshape(temp+temp.T,(1,Ndim**2))[0]))
    
    #redefine functions with fixed vals:
    def logLoss(p):
        return LLF.log_loss(p, stim, resp, order)
    def dlogLoss(p):
        return LLF.d_log_loss(p, stim, avgs, order)
    #run the function:
    #pfinal = opt.fmin_tnc(logLoss,pstart,fprime=dlogLoss)
    # conjugate-gradient:
    pfinal = opt.fmin_cg(logLoss,pstart,fprime=dlogLoss)
    #pfinal = opt.fmin(logLoss,pstart,fprime=dlogLoss)
    return pfinal
def KDTForest_ErrorCor(galleryL, probesL, ground_truth, K=3, forest_size=6,
                       numForests=5, binary_score=True):
    Np = len(probesL)
    gx = sp.array(galleryL)
    px = sp.array(probesL)
    
    ground_truth = sp.reshape(ground_truth,(Np,K)) if K==1 else ground_truth
    
    forests = []
    print "Building %d separate KDT Forests from input data..."%numForests
    for _idx in range(numForests):
        f = pyf.FLANN()
        _params = f.build_index(gx,algorithm='kdtree', trees=forest_size)
        forests.append(f)
    
    errs = []
    
    print "Testing %d Probe Points across %d KDT Forestsa"%(Np, numForests)
    for f in forests:
        print ".",
        sys.stdout.flush()
        [res, _ds] = f.nn_index(px, K)
        if K==1:
            res = sp.reshape(res,(Np,1))
        err_vec = compute_errors(res, ground_truth, binary_score=binary_score)
        errs.append( sp.reshape(err_vec, (Np,1) ) )
    print ""
      
    ErrM = sp.hstack(errs)
    return ErrM
def steepest_descent(A, b, x0, tol=1e-8):
    """
    Uses the steepest descent method to find the x that satisfies Ax = b.

    Inputs:
        A: An m x n NumPy array
        b: An m x 1 NumPy array
        x0: An n x 1 NumPy array that represents the initial guess at a
            solution.
        tol (optional): The tolerance level for convergence. This is compared
                        against the norm(x_n+1 - x_n) each iteration.

    Outputs:
        x: The x that satisfies the equation.
    """
    A = sp.mat(A)
    b = sp.reshape(sp.mat(b),(b.size,1))


    def grad(A, b, x):
        """
        Find the gradient of ||Ax - b||
        Inputs:
            A: An m x n NumPy matrix.
            b: An m x 1 NumPy matrix.
            x: An n x a NumPy matrix.

        Outputs:
            grad: A NumPy matrix representing the gradient of ||Ax - b||
        """
        return np.mat(2  * A.T*(A*x - b))

    def solve_alpha_k(A, b, x):
        """
        Solves for alpha in the steepest descent algorithm
        x_n+1 = x_n - alpha * grad(x_n)

        Inputs:
            A: An m x n NumPy array
            b: An m x 1 NumPy array
            x: The x value where you want alpha to be defined for.

        Outputs:
            alpha: The alpha satisfying the algorithm above.
        """

        gradient = grad(A, b, x)
        return np.array(
            (gradient.T * gradient)/(2 * gradient.T * A.T * A * gradient))[0]



    xold = sp.reshape(sp.mat(x0),(x0.size,1))
    xnew = xold - grad(A, b, xold) * solve_alpha_k(A,b,xold)

    while la.norm(xold - xnew) > tol:
        xold = xnew
        xnew = xold - grad(A, b, xold) * solve_alpha_k(A,b,xold)

    return xnew
    def func(self, X, V):
        k = self.C.TFdata.k
        v1 = self.C.TFdata.v1
        w1 = self.C.TFdata.w1
        
        if k >=0:
            J_coords = self.F.sysfunc.J_coords
            w = sqrt(k)
        
            q = v1 - (1j/w)*matrixmultiply(self.F.sysfunc.J_coords,v1)
            p = w1 + (1j/w)*matrixmultiply(transpose(self.F.sysfunc.J_coords),w1)
            
            p /= linalg.norm(p)
            q /= linalg.norm(q)

            p = reshape(p,(p.shape[0],))
            q = reshape(q,(q.shape[0],))
            
            direc = conjugate(1/matrixmultiply(transpose(conjugate(p)),q))
            p = direc*p

            l1 = firstlyapunov(X, self.F.sysfunc, w, J_coords=J_coords, p=p, q=q)
            
            return array([l1])
        else:
            return array([1])
示例#5
0
def Au(U,GF,EpsArr,NX,NY,NZ):
    """Returns the result of matrix-vector multiplication
       by the system matrix A=I-GX
    """
    # reshaping input vector into 4-D array
    Uarr=sci.reshape(U,(NX,NY,NZ,3))
    # extended zero-padded arrays
    Uext=sci.zeros((2*NX,2*NY,2*NZ,3),complex)
    Vext=sci.zeros((2*NX,2*NY,2*NZ,3),complex)
    Jext=sci.zeros((2*NX,2*NY,2*NZ,3),complex)
    JFext=sci.zeros((2*NX,2*NY,2*NZ,3),complex)
    Uext[0:NX,0:NY,0:NZ,:]=Uarr
    # contrast current array
    s=0
    while s<=2:
        Jext[0:NX,0:NY,0:NZ,s]=Uext[0:NX,0:NY,0:NZ,s]*(EpsArr[0:NX,0:NY,0:NZ]-1.0)
        JFext[:,:,:,s]=fft.fftn(sci.squeeze(Jext[:,:,:,s]))
        s=s+1
    Vext[:,:,:,0]=Uext[:,:,:,0]-\
    fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,0,0],JFext[:,:,:,0])+\
                          sci.multiply(GF[:,:,:,0,1],JFext[:,:,:,1])+\
                          sci.multiply(GF[:,:,:,0,2],JFext[:,:,:,2])))
    Vext[:,:,:,1]=Uext[:,:,:,1]-\
    fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,1,0],JFext[:,:,:,0])+\
                          sci.multiply(GF[:,:,:,1,1],JFext[:,:,:,1])+\
                          sci.multiply(GF[:,:,:,1,2],JFext[:,:,:,2])))
    Vext[:,:,:,2]=Uext[:,:,:,2]-\
    fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,2,0],JFext[:,:,:,0])+\
                          sci.multiply(GF[:,:,:,2,1],JFext[:,:,:,1])+\
                          sci.multiply(GF[:,:,:,2,2],JFext[:,:,:,2])))
    # reshaping output into column vector
    V=sci.reshape(Vext[0:NX,0:NY,0:NZ,:],(NX*NY*NZ*3,1))

    return V
示例#6
0
def condition(point,art_index):

    l = len(point.param['free'])
    a = len(point.param['artificial'])
    
    neq = point.neq
    dx = point.system.dx
    nx = len(point.u)/neq

    y0 = scipy.reshape(point.u,(neq,nx))
    
    left = scipy.zeros((neq,1),scipy.float64)
    right = scipy.zeros((neq,1),scipy.float64)
    left[:,0]=y0[:,0]
    right[:,0]=y0[:,-1]
    u=scipy.c_[left,y0,right]
    
    deriv = 1./(2*dx)*scipy.reshape(scipy.transpose(\
        u[:,2:]-u[:,:-2]),(nx*neq,))
    
    result = {}
    result['column'] = deriv
    result['row'] = deriv*dx
    result['d'] = scipy.zeros((l+a,),scipy.float64)
    result['eq_term'] = deriv*point.lambd[art_index]
    result['res'] = 0

    return result
示例#7
0
def ideal_data(num, dimU, dimY, dimX, noise=1):
    """Linear system data"""
    # generate randomized linear system matrices
    A = randn(dimX, dimX)
    B = randn(dimX, dimU)
    C = randn(dimY, dimX)
    D = randn(dimY, dimU)

    # make sure state evolution is stable
    U, S, V = svd(A)
    A = dot(U, dot(diag(S / max(S)), V))
    U, S, V = svd(B)
    S2 = zeros((size(U,1), size(V,0)))
    S2[:,:size(U,1)] = diag(S / max(S))
    B = dot(U, dot(S2, V))

    # random input
    U = randn(num, dimU)

    # initial state
    X = reshape(randn(dimX), (1,-1))

    # initial output
    Y = reshape(dot(C, X[-1]) + dot(D, U[0]), (1,-1))

    # generate next state
    X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, U[0]), (1,-1))))

    # and so forth
    for u in U[1:]:
        Y = concatenate((Y, reshape(dot(C, X[-1]) + dot(D, u), (1,-1))))
        X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, u), (1,-1))))

    return U, Y + randn(num, dimY) * noise
示例#8
0
def draw_cone(event=None):
    # cone radius 1
    Radius1 = 30.0
    # cone radius 2
    Radius2 = 70.0
    # cone height
    Height = 90.0
    # The center point at one of the flat cone faces 
    Point = scipy.array([-25.0, -50.0, 50.0])
    Point = scipy.reshape(Point,(3,1))
    # The direction of the cone from the point given above 
    DirectionFromPoint = scipy.array([25.0, 50.0, 150.0])
    DirectionFromPoint = scipy.reshape(DirectionFromPoint,(3,1))
    # create the cone object
    MyCone = cone_from_point_height_directionvector_and_two_radii( \
                                                            Point, 
                                                            DirectionFromPoint,
                                                            Height,
                                                            Radius1,
                                                            Radius2 )

    MyConeShape = MyCone.Shape()
    ais_shape_MyConeShape = AIS_Shape( MyConeShape ).GetHandle()
    ais_context = display.GetContext().GetObject()
    ais_context.SetMaterial(    ais_shape_MyConeShape,  
                                Graphic3d.Graphic3d_NOM_STONE )
    ais_context.Display( ais_shape_MyConeShape ) 
示例#9
0
def draw_arrow(event=None):
    # Length of the Arrow
    Arrowlength = 400.0
    # Shaft radius 
    RadiusOfArrowShaft = 20.0
    # Length of the the arrow heads cone
    LenghtOfArrowHead = 100.0
    # Radius of the the arrow heads cone
    RadiusOfArrowHead = 50.0
    # The center point at one of the flat cone faces 
    Point = scipy.array([-50.0, -50.0, 0.0])
    Point = scipy.reshape(Point,(3,1))
    # The direction of the cone from the point given above 
    DirectionFromPoint = scipy.array([-25.0, -50.0, -150.0])
    DirectionFromPoint = scipy.reshape(DirectionFromPoint,(3,1))
    # create the arrow shape
    # Look at the difference to the other functions and note that it is
    # also possible to create the shape in a function. If we do that we 
    # get a shape and not the object. 
    MyArrowShape = arrowShape(  Point, 
                                DirectionFromPoint,
                                Arrowlength,
                                RadiusOfArrowShaft,
                                LenghtOfArrowHead,
                                RadiusOfArrowHead )

    display.DisplayColoredShape( MyArrowShape , 'BLACK' ) 
示例#10
0
文件: fitfun.py 项目: myw/dataiap
def rerun_dfa(chrom,xdata,mask,groups,names,DFs):
    """Run DFA in min app"""
    #extract vars from xdata
    slice = meancent(_slice(xdata,chrom))
    
    #split in to training and test
    tr_slice,cv_slice,ts_slice,tr_grp,cv_grp,ts_grp,tr_nm,cv_nm,ts_nm=_split(slice,groups,mask,names)
    
    #get indexes
    idx = scipy.arange(xdata.shape[0])[:,nA]
    tr_idx = scipy.take(idx,_index(mask,0),0)
    cv_idx = scipy.take(idx,_index(mask,1),0)
    ts_idx = scipy.take(idx,_index(mask,2),0)
    
    #model DFA on training samples
    u,v,eigs,dummy = cva(tr_slice,tr_grp,DFs)
    
    #project xval and test samples
    projUcv = scipy.dot(cv_slice,v)
    projUt = scipy.dot(ts_slice,v)
    
    uout = scipy.zeros((xdata.shape[0],DFs),'d')
    _put(uout,scipy.reshape(tr_idx,(len(tr_idx),)).tolist(),u)
    _put(uout,scipy.reshape(cv_idx,(len(cv_idx),)).tolist(),projUcv)
    _put(uout,scipy.reshape(ts_idx,(len(ts_idx),)).tolist(),projUt)
    
    return uout,v,eigs      
 def _generate_masked_mesh(self, cell_mask=None):
     r"""
     Generates the mesh based on the cell mask provided
     """
     #
     if cell_mask is None:
         cell_mask = sp.ones(self.data_map.shape, dtype=bool)
     #
     # initializing arrays
     self._edges = sp.ones(0, dtype=str)
     self._merge_patch_pairs = sp.ones(0, dtype=str)
     self._create_blocks(cell_mask)
     #
     # building face arrays
     mapper = sp.ravel(sp.array(cell_mask, dtype=int))
     mapper[mapper == 1] = sp.arange(sp.count_nonzero(mapper))
     mapper = sp.reshape(mapper, (self.nz, self.nx))
     mapper[~cell_mask] = -sp.iinfo(int).max
     #
     boundary_dict = {
         'bottom':
             {'bottom': mapper[0, :][cell_mask[0, :]]},
         'top':
             {'top': mapper[-1, :][cell_mask[-1, :]]},
         'left':
             {'left': mapper[:, 0][cell_mask[:, 0]]},
         'right':
             {'right': mapper[:, -1][cell_mask[:, -1]]},
         'front':
             {'front': mapper[cell_mask]},
         'back':
             {'back': mapper[cell_mask]},
         'internal':
             {'bottom': [], 'top': [], 'left': [], 'right': []}
     }
     #
     # determining cells linked to a masked cell
     cell_mask = sp.where(~sp.ravel(cell_mask))[0]
     inds = sp.in1d(self._field._cell_interfaces, cell_mask)
     inds = sp.reshape(inds, (len(self._field._cell_interfaces), 2))
     inds = inds[:, 0].astype(int) + inds[:, 1].astype(int)
     inds = (inds == 1)
     links = self._field._cell_interfaces[inds]
     #
     # adjusting order so masked cells are all on links[:, 1]
     swap = sp.in1d(links[:, 0], cell_mask)
     links[swap] = links[swap, ::-1]
     #
     # setting side based on index difference
     sides = sp.ndarray(len(links), dtype='<U6')
     sides[sp.where(links[:, 1] == links[:, 0]-self.nx)[0]] = 'bottom'
     sides[sp.where(links[:, 1] == links[:, 0]+self.nx)[0]] = 'top'
     sides[sp.where(links[:, 1] == links[:, 0]-1)[0]] = 'left'
     sides[sp.where(links[:, 1] == links[:, 0]+1)[0]] = 'right'
     #
     # adding each block to the internal face dictionary
     inds = sp.ravel(mapper)[links[:, 0]]
     for side, block_id in zip(sides, inds):
         boundary_dict['internal'][side].append(block_id)
     self.set_boundary_patches(boundary_dict, reset=True)
示例#12
0
    def get_introns(self):
        
        _introns = sp.reshape(self.exons1.ravel()[1:-1], (self.exons1.shape[0] - 1, 2))
        if len(self.exons2.shape) > 1:
            _introns = sp.r_[_introns, sp.reshape(self.exons2.ravel()[1:-1], (self.exons2.shape[0] - 1, 2))]

        return _introns
示例#13
0
def crossValidate(y, X, K=None, folds=3, model=None, returnModel=False):
    errors =  SP.empty(folds)
    n = y.shape[0]
    indexes = crossValidationScheme(folds,n)
    predictions = SP.empty(y.shape)
    alpha = []
    alphas = []
    msePath = []
    for cvRun in SP.arange(len(indexes)):
        testIndexes = indexes[cvRun]
        yTrain = y[~testIndexes]
        XTrain = X[~testIndexes]
        if K == None:
            model.fit(XTrain, yTrain)
            prediction = SP.reshape(model.predict(X[testIndexes]), (-1,1))
        else: # models having population structure
            KTrain = K[~testIndexes]
            KTrain = KTrain[:,~testIndexes]
            KTest=K[testIndexes]
            KTest=KTest[:,~testIndexes]
            model.reset()
            model.kernel = KTrain #TODO: make nice integration
            model.fit(XTrain, yTrain)
            prediction = SP.reshape(model.predict(X[testIndexes], k=KTest), (-1,1))
        predictions[testIndexes] = prediction
        errors[cvRun] = predictionError(y[testIndexes], prediction)
        print(('prediction error right now is', errors[cvRun]))
        if returnModel:
            alpha.append(model.alpha)
            alphas.append(model.alphas)
            msePath.append(model.mse_path)
    if returnModel:
        return indexes, predictions, errors, alpha, alphas, msePath
    else:
        return indexes, predictions, errors
示例#14
0
 def quantize0 (image):
     row,col = image.shape
     vect = reshape(image,(row*col,))
     vect = AA(int)((vect-min)*scalingFact + 0.5)
     vect = np.array(vect)
     vect = vect/scalingFact + min
     return reshape(vect,(row,col))
示例#15
0
    def __init__(self, U, Y, statedim, reg=None):
        if size(shape(U)) == 1:
            U = reshape(U, (-1,1))
        if size(shape(Y)) == 1:
            Y = reshape(Y, (-1,1))
        if reg is None:
            reg = 0

        yDim = size(Y,1)
        uDim = size(U,1)

        self.output_size = size(Y,1) # placeholder

        # number of samples of past/future we'll mash together into a 'state'
        width = 1
        # total number of past/future pairings we get as a result
        K = size(U,0) - 2 * width + 1

        # build hankel matrices containing pasts and futures
        U_p = array([ravel(U[t : t + width]) for t in range(K)]).T
        U_f = array([ravel(U[t + width : t + 2 * width]) for t in range(K)]).T
        Y_p = array([ravel(Y[t : t + width]) for t in range(K)]).T
        Y_f = array([ravel(Y[t + width : t + 2 * width]) for t in range(K)]).T

        # solve the eigenvalue problem
        YfUfT = dot(Y_f, U_f.T)
        YfUpT = dot(Y_f, U_p.T)
        YfYpT = dot(Y_f, Y_p.T)
        UfUpT = dot(U_f, U_p.T)
        UfYpT = dot(U_f, Y_p.T)
        UpYpT = dot(U_p, Y_p.T)
        F = bmat([[None, YfUfT, YfUpT, YfYpT],
                  [YfUfT.T, None, UfUpT, UfYpT],
                  [YfUpT.T, UfUpT.T, None, UpYpT],
                  [YfYpT.T, UfYpT.T, UpYpT.T, None]])
        Ginv = bmat([[pinv(dot(Y_f,Y_f.T)), None, None, None],
                     [None, pinv(dot(U_f,U_f.T)), None, None],
                     [None, None, pinv(dot(U_p,U_p.T)), None],
                     [None, None, None, pinv(dot(Y_p,Y_p.T))]])
        F = F - eye(size(F, 0)) * reg

        # Take smallest eigenvalues
        _, W = eigs(Ginv.dot(F), k=statedim, which='SR')

        # State sequence is a weighted combination of the past
        W_U_p = W[ width * (yDim + uDim) : width * (yDim + uDim + uDim), :]
        W_Y_p = W[ width * (yDim + uDim + uDim):, :]
        X_hist = dot(W_U_p.T, U_p) + dot(W_Y_p.T, Y_p)

        # Regress; trim inputs to match the states we retrieved
        R = concatenate((X_hist[:, :-1], U[width:-width].T), 0)
        L = concatenate((X_hist[:, 1: ], Y[width:-width].T), 0)
        RRi = pinv(dot(R, R.T))
        RL  = dot(R, L.T)
        Sys = dot(RRi, RL).T
        self.A = Sys[:statedim, :statedim]
        self.B = Sys[:statedim, statedim:]
        self.C = Sys[statedim:, :statedim]
        self.D = Sys[statedim:, statedim:]
示例#16
0
def pca_svd(myarray,type='covar'):
    """Run principal components analysis (PCA) by singular
    value decomposition (SVD)
    
    >>> import scipy
    >>> a = scipy.array([[1,2,3],[0,1,1.5],[-1,-6,34],[8,15,2]])
    >>> a
    array([[  1. ,   2. ,   3. ],
           [  0. ,   1. ,   1.5],
           [ -1. ,  -6. ,  34. ],
           [  8. ,  15. ,   2. ]])
    >>> # There are four samples, with three variables each
    >>> tt,pp,pr,eigs = pca_svd(a)
    >>> tt
    array([[  5.86463567e+00,  -4.28370443e+00,   1.46798845e-01],
           [  6.65979784e+00,  -6.16620433e+00,  -1.25067331e-01],
           [ -2.56257861e+01,   1.82610701e+00,  -6.62877855e-03],
           [  1.31013526e+01,   8.62380175e+00,  -1.51027354e-02]])
    >>> pp
    array([[ 0.15026487,  0.40643255, -0.90123973],
           [ 0.46898935,  0.77318935,  0.4268808 ],
           [ 0.87032721, -0.48681703, -0.07442934]])
    >>> # This is the 'rotation matrix' - you can imagine colm labels
    >>> # of PC1, PC2, PC3 and row labels of variable1, variable2, variable3.
    >>> pr
    array([[  0.        ],
           [ 97.1073744 ],
           [ 98.88788958],
           [ 99.98141011]])
    >>> eigs
    array([[ 30.11765617],
           [ 11.57915467],
           [  0.1935556 ]])
    >>> a
    array([[  1. ,   2. ,   3. ],
           [  0. ,   1. ,   1.5],
           [ -1. ,  -6. ,  34. ],
           [  8. ,  15. ,   2. ]])
    """
    if type=='covar':
        myarray = _meancent(myarray)
    elif type=='corr':
        myarray = _autoscale(myarray)
    else:
        raise KeyError, "'type' must be one of 'covar or 'corr'"

    # I think this may run faster if myarray is converted to a matrix first.
    # (This should be tested - anyone got a large dataset?)
    # mymat = scipy.mat(myarray)
    u,s,v = scipy.linalg.svd(myarray)
    tt = scipy.dot(myarray,scipy.transpose(v))
    pp = v
    pr = (1-(s/scipy.sum(scipy.sum(myarray**2))))*100
    pr = scipy.reshape(pr,(1,len(pr)))
    pr = scipy.concatenate((scipy.array([[0.0]]),pr),1)
    pr = scipy.reshape(pr,(pr.shape[1],))
    eigs = s

    return tt,pp,pr[:,nA],eigs[:,nA]
示例#17
0
 def _forwardImplementation(self, inbuf, outbuf):
     par = reshape(self.params, (3, self.outdim))
     inn = reshape(inbuf, (self.dx, self.dy))
     self.out = numpy.zeros((self.outdim, self.dx, self.dy))
     for k in xrange(0, len(outbuf)):
         kernel = ((self.xx - par[0][k]) ** 2 + (self.yy - par[1][k]) ** 2) / (2 * par[2][k] ** 2)
         self.out[k] = numpy.multiply(inn, pybrain.tools.functions.safeExp(-kernel))
         outbuf[k] += numpy.sum(self.out[k])
示例#18
0
def write_gmm_data_file_depth(
    model_name, mag, dist, depth, result_type, periods, file_out, component_type="AVERAGE_HORIZONTAL"
):
    """
    Create a file of input and output parameters for the sommerville GMM.

    params:
      model_name: The ground motion model, as a string.
      mag: dictionary, key - the mag column name, values, the mag vectors,
           as a list
      dist: dictionary, key - the distance column name, value,
            the distance vectors, as a list.
      depth: depth in km.
      result_type: MEAN or TOTAL_STDDEV
      periods: A list of periods requiring SA values.
               The first value has to be 0.0.

       Mag, distance and periods will be iterated over to give a single SA for
       each combination.
       file_out: The file name and location of the produced data file.
    """
    assert periods[0] == 0.0
    handle = open(file_out, "wb")
    writer = csv.writer(handle, delimiter=",", quoting=csv.QUOTE_NONE)

    # write title
    title = [depth[0], mag[0], dist[0], "result_type", "component_type"] + periods[1:] + ["pga"]
    writer.writerow(title)

    # prepare the coefficients
    model = Ground_motion_specification(model_name)
    coeff = model.calc_coefficient(periods)
    coeff = reshape(coeff, (coeff.shape[0], 1, 1, coeff.shape[1]))
    sigma_coeff = model.calc_sigma_coefficient(periods)
    sigma_coeff = reshape(sigma_coeff, (sigma_coeff.shape[0], 1, 1, sigma_coeff.shape[1]))

    # Iterate
    for depi in depth[1]:
        for magi in mag[1]:
            for disti in dist[1]:
                dist_args = {
                    "mag": array([[[magi]]]),
                    dist[0]: array([[[disti]]]),
                    "depth": array([[[depi]]]),
                    "coefficient": coeff,
                    "sigma_coefficient": sigma_coeff,
                }
                log_mean, log_sigma = model.distribution(**dist_args)
                sa_mod = list(log_mean.reshape(-1))
                sa_mod = [math.exp(x) for x in sa_mod]
                sigma_mod = list(log_sigma.reshape(-1))
                if result_type == "MEAN":
                    row = [depi, magi, disti, result_type, component_type] + sa_mod[1:] + [sa_mod[0]]
                else:
                    row = [depi, magi, disti, result_type, component_type] + sigma_mod[1:] + [sigma_mod[0]]

                writer.writerow(row)
    handle.close()
示例#19
0
def segmented():
    
    radius = 5 
    sigmaI = 0.02 
    sigmaX = 3.0 
    height = img.shape[0]
    width = img.shape[1]
    flatImg = img.flatten()
    darkImg = flatImg
    brightImg = flatImg
    
    nodes = img.flatten()
    
    W = spar.lil_matrix((nodes.size, nodes.size),dtype=float)
    D = sp.zeros((1,nodes.size))
    
    for row in range(height):
        for col in range(width):				
            for k in range(row-radius,row+radius):
                for l in range(col-radius,col+radius):
                    try:
                        w = weight(row,col,k,l)
                        W[row*width+col,k*width+l] = w
                        D[0,row*width+col] += w		
                    except:
                        continue
                        
    D = spar.spdiags(D, 0, nodes.size, nodes.size)

    Q = D - W
     
    D1 = D.todense()
    Q1 = Q.todense()
    
    diags = sp.diag(D1)
    DminusHalf = sp.diag(diags**-0.5)
    
    
    segQ = sp.dot(sp.dot(DminusHalf, Q1),DminusHalf)
    vals, vecs = la.eig(segQ)
    
    vecind = sp.argsort(vals)[1]
    theVec = vecs[vecind]

    for i in range(0,height**2):
        if theVec[i] < 0:
            darkImg[i] = 0.0
        else:
            brightImg[i] = 0.0
            
    
    darkImg = sp.reshape(darkImg, (height,height))
    brightImg = sp.reshape(brightImg, (height,height))
             
    
    
    
    return darkImg, flatImg, brightImg
示例#20
0
 def eig(self):
     noise_evalsinv, noise_evects = linalg.eigh(sp.reshape(self.noise_inv, 
                                         (self.size, self.size)))
     self.noise_evalsinv = al.make_mat(noise_evalsinv, axis_names=('mode',),
                                  row_axes=(0,), col_axes=(0,))
     self.noise_evects = al.make_mat(sp.reshape(noise_evects, 
                                        (self.shape + (self.size,))),
                                axis_names=('freq', 'ra', 'dec', 'mode'),
                                row_axes=(0, 1, 2), col_axes=(3,))
示例#21
0
 def _provide(self):
     self.stochfun._newSample(self.paramdim*self.batch_size, override=True)
     if self.record_samples:
         ls = self.stochfun._lastseen
         if self.batch_size == 1:
             self._seen.append(ls)
         else:
             for l in reshape(ls, (self.batch_size, self.paramdim)):
                 self._seen.append(reshape(l, (1, self.paramdim)))
示例#22
0
def shift_back_front(x,phi,neq,dx):
	n = len(x)/neq
	u = scipy.reshape(x,(neq,n))
	u_left = scipy.transpose([u[:,0]])
	u_right = scipy.transpose([u[:,-1]])
	u_ext = scipy.c_[u_left,u,u_right]
	dudx = 1./(2*dx)*(u_ext[:,2:]-u_ext[:,:-2])
	#u_ext = scipy.c_[u_left,u_left,u,u_right,u_right]
	#dudx = 1./(12*dx)*(-u_ext[:,4:]+8*u_ext[:,3:-1]-8*u_ext[:,1:-3]+u_ext[:,:-4])
	x_shifted=scipy.reshape(u+phi*dudx,(neq*n,))
	return x_shifted
示例#23
0
def rv_sample(obs = None, tspan = 180, npernight = 3, drun = 10, \
                  nrun = 3, nrand = 10, dnight = 8./24.):
    if obs != None:
        # Read in RV data
        if obs == 'corot7':
            rv = atpy.Table(corotdefs.ROOTDIR + 'LRa01/cands/corot7_rv.ipac')
            time = rv.JDB
        if obs == 'hd189':
            rv = atpy.Table('/Users/suz/Data/HD189_rv.ipac')
            time = rv.hjd
    else:
        # One point per night
        days = scipy.arange(tspan)
        dt_night = dnight / float(npernight + 1)
        # Multiple points per night, with small deviations from regularity
        obs = scipy.zeros((tspan, npernight))
        for i in scipy.arange(npernight):
            obs[:,i] = days[:] + dt_night * float(i) + \
                pylab.normal(0, dt_night/2., tspan)
# Select points in "intensive" runs
        if drun == tspan:
            take = scipy.ones((tspan, npernight), 'int')
        else:
            take = scipy.zeros((tspan, npernight), 'int')
            for i in scipy.arange(nrun):
                ok = 0
                while ok == 0:
                    tstart = scipy.fix(scipy.rand(1) * float(tspan))
                    tstart = tstart[0]
                    tend = tstart + drun
                    if tend > tspan: continue
                    if take[tstart:tend, :].any(): continue
                    take[tstart:tend, :] = 1
                    ok = 1


# Select additional individual points
        ntot = tspan * npernight
        obs = scipy.reshape(obs, ntot)
        take = scipy.reshape(take, ntot)
        index = scipy.argsort(obs)
        obs = obs[index]
        take = take[index]
        for i in scipy.arange(nrand):
            ok = 0
            while ok == 0:
                t = scipy.fix(scipy.rand(1) * float(ntot))
                t = t[0]
                if take[t] == 1: continue
                take[t] = 1
                ok = 1
        time = obs[(take == 1)]
    time -= time[0]
    return time
示例#24
0
    def __init__(self, X, Y, reg=None):
        if size(shape(X)) is 1:
            X = reshape(X, (-1, 1))
        if size(shape(Y)) is 1:
            Y = reshape(Y, (-1, 1))
        if reg is None:
            reg = 0

        W1 = pinv(dot(X.T, X) + reg * eye(size(X, 1)))
        W2 = dot(X, W1)
        self.W = dot(Y.T, W2)
示例#25
0
def train(X,K,y,mu,numintervals=100,ldeltamin=-5,ldeltamax=5,rho=1,alpha=1,debug=False):
    """
    train linear mixed model lasso

    Input:
    X: SNP matrix: n_s x n_f
    y: phenotype:  n_s x 1
    K: kinship matrix: n_s x n_s
    mu: l1-penalty parameter
    numintervals: number of intervals for delta linesearch
    ldeltamin: minimal delta value (log-space)
    ldeltamax: maximal delta value (log-space)
    rho: augmented Lagrangian parameter for Lasso solver
    alpha: over-relatation parameter (typically ranges between 1.0 and 1.8) for Lasso solver

    Output:
    results
    """
    print 'train LMM-Lasso'
    print '...l1-penalty: %.2f'%mu
    
    time_start = time.time()
    [n_s,n_f] = X.shape
    assert X.shape[0]==y.shape[0], 'dimensions do not match'
    assert K.shape[0]==K.shape[1], 'dimensions do not match'
    assert K.shape[0]==X.shape[0], 'dimensions do not match'
    if y.ndim==1:
        y = SP.reshape(y,(n_s,1))

    # train null model
    S,U,ldelta0,monitor_nm = train_nullmodel(y,K,numintervals,ldeltamin,ldeltamax,debug=debug)
    
    # train lasso on residuals
    delta0 = SP.exp(ldelta0)
    Sdi = 1./(S+delta0)
    Sdi_sqrt = SP.sqrt(Sdi)
    SUX = SP.dot(U.T,X)
    SUX = SUX * SP.tile(Sdi_sqrt,(n_f,1)).T
    SUy = SP.dot(U.T,y)
    SUy = SUy * SP.reshape(Sdi_sqrt,(n_s,1))
    
    w,monitor_lasso = train_lasso(SUX,SUy,mu,rho,alpha,debug=debug)

    time_end = time.time()
    time_diff = time_end - time_start
    print '... finished in %.2fs'%(time_diff)

    res = {}
    res['ldelta0'] = ldelta0
    res['weights'] = w
    res['time'] = time_diff
    res['monitor_lasso'] = monitor_lasso
    res['monitor_nm'] = monitor_nm
    return res
示例#26
0
 def test_partial_dot_mat_vect(self):
     self.mat.shape = (4, 6, 5)
     self.mat.rows = (0, 1)
     self.mat.cols = (2,)
     self.mat.axes = ('x', 'y', 'freq')
     new_vect = algebra.partial_dot(self.mat, self.vect)
     self.assertEqual(new_vect.shape, (4, 6, 2, 3))
     self.assertEqual(new_vect.axes, ('x', 'y', 'a', 'b'))
     numerical_result = sp.dot(sp.reshape(self.mat, (4*6, 5)), 
                               sp.reshape(self.vect, (5, 2*3)))
     self.assertTrue(sp.allclose(numerical_result.flatten(),
                                 new_vect.flatten()))
示例#27
0
文件: process.py 项目: myw/dataiap
def _slice(x, index, axis=0):
    """for slicing arrays
    """
    if axis == 0:
        slice = scipy.reshape(x[:, int(index[0])], (x.shape[0], 1))
        for n in range(1, len(index), 1):
            slice = scipy.concatenate((slice, reshape(x[:, int(index[n])], (x.shape[0], 1))), 1)
    elif axis == 1:
        slice = scipy.reshape(x[int(index[0]), :], (1, x.shape[1]))
        for n in range(1, len(index)):
            slice = scipy.concatenate((slice, reshape(x[int(index[n]), :], (1, x.shape[1]))), 0)
    return slice
示例#28
0
    def _update_cache(self):
        """
        Update cache
        """
        cov_params_have_changed = self.Cr.params_have_changed or self.Cn.params_have_changed

        if self.Xr_has_changed:
            start = TIME.time()
            """ Row SVD Bg + Noise """
            Urstar,S,V = NLA.svd(self.Xr)
            self.cache['Srstar'] = SP.concatenate([S**2,SP.zeros(self.N-S.shape[0])])
            self.cache['Lr']     = Urstar.T
            self.mean.setRowRotation(Lr=self.cache['Lr'])

            smartSum(self.time,'cache_XXchanged',TIME.time()-start)
            smartSum(self.count,'cache_XXchanged',1)
        
        if cov_params_have_changed:
            start = TIME.time()
            """ Col SVD Noise """
            S2,U2 = LA.eigh(self.Cn.K()+self.offset*SP.eye(self.P))
            self.cache['Sc2'] = S2
            US2   = SP.dot(U2,SP.diag(SP.sqrt(S2)))
            USi2  = SP.dot(U2,SP.diag(SP.sqrt(1./S2)))
            """ Col SVD region """
            A     = SP.reshape(self.Cr.getParams(),(self.P,self.rank),order='F')
            Astar = SP.dot(USi2.T,A)
            Ucstar,S,V = NLA.svd(Astar)
            self.cache['Scstar'] = SP.concatenate([S**2,SP.zeros(self.P-S.shape[0])])
            self.cache['Lc']     = SP.dot(Ucstar.T,USi2.T)

            """ pheno """
            self.mean.setColRotation(self.cache['Lc'])


        if cov_params_have_changed or self.Xr_has_changed:
            """ S """
            self.cache['s'] = SP.kron(self.cache['Scstar'],self.cache['Srstar'])+1
            self.cache['d'] = 1./self.cache['s']
            self.cache['D'] = SP.reshape(self.cache['d'],(self.N,self.P), order='F')

            """ pheno """
            self.cache['LY']  = self.mean.evaluate()
            self.cache['DLY'] = self.cache['D']*self.cache['LY']

            smartSum(self.time,'cache_colSVDpRot',TIME.time()-start)
            smartSum(self.count,'cache_colSVDpRot',1)

        self.Y_has_changed = False
        self.Xr_has_changed = False
        self.Cr.params_have_changed = False
        self.Cn.params_have_changed = False
示例#29
0
def test_thermal_conduction():
    # Generate Network and clean up boundaries (delete z-face pores)
    divs = [10, 50]
    Lc = 0.1  # cm
    pn = OpenPNM.Network.Cubic(shape=divs, spacing=Lc)
    pn.add_boundaries()
    pn.trim(pores=pn.pores(['top_boundary', 'bottom_boundary']))
    # Generate Geometry objects for internal and boundary pores
    Ps = pn.pores('internal')
    Ts = pn.throats()
    geom = OpenPNM.Geometry.GenericGeometry(network=pn,
                                            pores=Ps,
                                            throats=Ts)
    geom['pore.area'] = Lc**2
    geom['pore.diameter'] = Lc
    geom['throat.length'] = 1e-25
    geom['throat.area'] = Lc**2
    Ps = pn.pores('boundary')
    boun = OpenPNM.Geometry.GenericGeometry(network=pn, pores=Ps)
    boun['pore.area'] = Lc**2
    boun['pore.diameter'] = 1e-25
    # Create Phase object and associate with a Physics object
    Cu = OpenPNM.Phases.GenericPhase(network=pn)
    Cu['pore.thermal_conductivity'] = 1.0  # W/m.K
    phys = OpenPNM.Physics.GenericPhysics(network=pn,
                                          phase=Cu,
                                          pores=pn.pores(),
                                          throats=pn.throats())
    mod = OpenPNM.Physics.models.thermal_conductance.series_resistors
    phys.add_model(propname='throat.thermal_conductance', model=mod)
    phys.regenerate()  # Update the conductance values
    # Setup Algorithm object
    Fourier_alg = OpenPNM.Algorithms.FourierConduction(network=pn, phase=Cu)
    inlets = pn.pores('back_boundary')
    outlets = pn.pores(['front_boundary', 'left_boundary', 'right_boundary'])
    T_in = 30*sp.sin(sp.pi*pn['pore.coords'][inlets, 1]/5)+50
    Fourier_alg.set_boundary_conditions(bctype='Dirichlet',
                                        bcvalue=T_in,
                                        pores=inlets)
    Fourier_alg.set_boundary_conditions(bctype='Dirichlet',
                                        bcvalue=50,
                                        pores=outlets)
    Fourier_alg.run()
    Fourier_alg.return_results()
    # Calculate analytical solution over the same domain spacing
    Cu['pore.analytical_temp'] = 30*sp.sinh(sp.pi*pn['pore.coords'][:, 0]/5)/sp.sinh(sp.pi/5)*sp.sin(sp.pi*pn['pore.coords'][:, 1]/5) + 50
    b = Cu['pore.analytical_temp'][pn.pores(geom.name)]
    a = Cu['pore.temperature'][pn.pores(geom.name)]
    a = sp.reshape(a, (divs[0], divs[1]))
    b = sp.reshape(b, (divs[0], divs[1]))
    diff = a - b
    assert sp.amax(np.absolute(diff)) < 0.015
示例#30
0
 def test_collapse_att_model2(self):
     spawn = 2
     gmm = 3
     site = 1
     events = 1
     periods = 1
     data = array([[100, 10, 1.],[200, 20, 2]])
     data = reshape(data, (spawn, gmm, 1, site, events, periods))
     weights = [1., 2., 3.]
     sum = collapse_att_model(data, weights, True)
     actual = array([[123.],[246.]])
     actual = reshape(actual, (spawn, 1, 1, site, events, periods))
     self.assert_ (allclose(sum, actual))
示例#31
0
def quiverGD(geod,axstr,slicenum,arrowscale,vbounds=None,time = 0,gkey = None,cmap='jet', fig=None,ax=None,title='',cbar=True,m=None):
    poscoords = ['cartesian','wgs84','enu','ecef']
    assert geod.coordnames.lower() in poscoords

    if geod.coordnames.lower() in ['cartesian','enu','ecef']:
        axdict = {'x':0,'y':1,'z':2}
        veckeys = ['x','y','z']
    elif geod.coordnames.lower() == 'wgs84':
        axdict = {'lat':0,'long':1,'alt':2}# shows which row is this coordinate
        veckeys = ['long','lat','alt']# shows which is the x, y and z axes for plotting
    if type(axstr)==str:
        axis=axstr
    else:
        axis= veckeys[axstr]
    veckeys.remove(axis.lower())
    veckeys.append(axis.lower())
    datacoords = geod.dataloc
    xyzvecs = {l:sp.unique(datacoords[:,axdict[l]]) for l in veckeys}
    #make matrices
    M1,M2 = sp.meshgrid(xyzvecs[veckeys[0]],xyzvecs[veckeys[1]])
    slicevec = sp.unique(datacoords[:,axdict[axis]])
    min_idx = sp.argmin(sp.absolute(slicevec-slicenum))
    slicenum=slicevec[min_idx]
    rec_coords = {axdict[veckeys[0]]:M1.flatten(),axdict[veckeys[1]]:M2.flatten(),
                  axdict[axis]:slicenum*sp.ones(M2.size)}


    new_coords = sp.zeros((M1.size,3))
    #make coordinates
    for ckey in rec_coords.keys():
        new_coords[:,ckey] = rec_coords[ckey]
    #determine the data name
    if gkey is None:
        gkey = geod.data.keys()[0]

    # get the data location, first check if the data can be just reshaped then do a
    # search

    sliceindx = slicenum==datacoords[:,axdict[axis]]

    datacoordred = datacoords[sliceindx]
    rstypes = ['C','F','A']
    nfounds = True
    M1dlfl = datacoordred[:,axdict[veckeys[0]]]
    M2dlfl = datacoordred[:,axdict[veckeys[1]]]
    for ir in rstypes:
        M1dl = sp.reshape(M1dlfl,M1.shape,order =ir)
        M2dl = sp.reshape(M2dlfl,M1.shape,order =ir)
        if sp.logical_and(sp.allclose(M1dl,M1),sp.allclose(M2dl,M2)):
            nfounds=False
            break
    if nfounds:

        dx = geod.datareducelocation(new_coords,geod.coordnames,gkey[0])[:,time]
        dy = geod.datareducelocation(new_coords,geod.coordnames,gkey[1])[:,time]
        dx = sp.reshape(dx,M1.shape)
        dy = sp.reshape(dy,M1.shape)
    else:
        dx = sp.reshape(geod.data[gkey[0]][sliceindx,time],M1.shape,order=ir)
        dy = sp.reshape(geod.data[gkey[1]][sliceindx,time],M1.shape,order=ir)


    title = insertinfo(title,gkey[0],geod.times[time,0],geod.times[time,1])

    if (ax is None) and (fig is None):
        fig = plt.figure(facecolor='white')
        ax = fig.gca()
    elif ax is None:
        ax = fig.gca()

    if m is None:


        quiv = ax.quiver(M1,M2,dx,dy,scale=arrowscale)

        ax.axis([xyzvecs[veckeys[0]].min(), xyzvecs[veckeys[0]].max(),
                 xyzvecs[veckeys[1]].min(), xyzvecs[veckeys[1]].max()])

        ax.set_title(title)
        ax.set_xlabel(veckeys[0])
        ax.set_ylabel(veckeys[1])
    else:
        N1,N2 = m(M1,M2)

        quiv = ax.quiver(M1,M2,dx,dy,scale=arrowscale)


    return(quiv)
if ( dimensions == 1 ):
    X         = mgrid[xMin:xMax:ngrid                                  ]
    positions = c_   [X.ravel()                                        ]
    values    = c_   [stateX                                           ]
elif ( dimensions == 2 ):
    X, Y      = mgrid[xMin:xMax:ngrid, yMin:yMax:ngrid                 ]
    positions = c_   [X.ravel()      , Y.ravel()                       ]
    values    = c_   [stateX         , stateY                          ]
else:
    X, Y, Z   = mgrid[xMin:xMax:ngrid, yMin:yMax:ngrid, zMin:zMax:ngrid]
    positions = c_   [X.ravel()      , Y.ravel()      , Z.ravel()      ]
    values    = c_   [stateX         , stateY         , stateZ         ]

# Perform a kernel density estimator on the results.
kernel = stats.kde.gaussian_kde(values.T)
rho    = reshape(kernel(positions.T).T, X.T.shape)

# Output data.
if dimensions == 1:
    D = np.column_stack((X.flat,rho.flat))
elif ( dimensions == 2 ):
    D = np.column_stack((X.flat,Y.flat,rho.flat))
else:
    D = np.column_stack((X.flat,Y.flat,Z.flat,rho.flat))
if options.output:
    f = open(options.output, 'w')
else:
    f = sys.stdout
# For a 3D mesh, we output the header required for reading by the Ifrit package.
if dimensions == 3:
    f.write(str(options.ngrid)+" "+str(options.ngrid)+" "+str(options.ngrid)+"\n")
# Grid increments
dx = lx / nx
dy = ly / ny

# Initialize vortex
u, v, p = nst.dancing_vortices(nx, ny, dx, dy)

# Simulation time
T_simu = 10000

# Set discrete time step by choosing CFL number (condition: CFL <= 1)
CFL = 1
u_max = sc.amax(sc.absolute(u))
v_max = sc.amax(sc.absolute(v))
t_step = (CFL * dx * dy) / (u_max * dy + v_max * dx)
t_step_vec = sc.reshape(t_step * sc.ones(nx * ny), (nx * ny, 1), order="F")
t_step_vec = sp.csc_matrix(t_step_vec)

# Reshape velocity fields: u_vec, v_vec denote the vector-shaped velocity field
# and U, V denote the velocity values written in a diagonal matrix. The
# diagonal matrix form is needed to calculate the operator for the convective
# term (Transport Operator) in the NST-equation. Both the vector-shaped and
# diagonal-matrix shaped velocities are transformed to sparse matrices
u_vec = sc.reshape(u, (nx * ny, 1), order="F")
u_vec = sp.csc_matrix(u_vec)
U = sc.reshape(u, (nx * ny), order="F")
U = sc.diag(U)
U = sp.csc_matrix(U)

v_vec = sc.reshape(v, (nx * ny, 1), order="F")
v_vec = sp.csc_matrix(v_vec)
示例#34
0
def mrelnet(x, is_sparse, irs, pcs, y, weights, offset, parm, nobs, nvars, jd,
            vp, cl, ne, nx, nlam, flmin, ulam, thresh, isd, jsd, intr, maxit,
            family):

    # load shared fortran library
    glmlib = loadGlmLib()

    #
    nr = y.shape[1]
    wym = wtmean(y, weights)
    wym = scipy.reshape(wym, (1, wym.size))
    yt2 = (y - scipy.tile(wym, (y.shape[0], 1)))**2
    nulldev = scipy.sum(wtmean(yt2, weights) * scipy.sum(weights))

    if len(offset) == 0:
        offset = y * 0
        is_offset = False
    else:
        if offset.shape != y.shape:
            raise ValueError('Offset must match dimension of y')
        is_offset = True
    #
    y = y - offset
    # now convert types and allocate memory before calling
    # glmnet fortran library
    ######################################
    # --------- PROCESS INPUTS -----------
    ######################################
    # force inputs into fortran order and scipy float64
    copyFlag = False
    x = x.astype(dtype=scipy.float64, order='F', copy=copyFlag)
    irs = irs.astype(dtype=scipy.int32, order='F', copy=copyFlag)
    pcs = pcs.astype(dtype=scipy.int32, order='F', copy=copyFlag)
    y = y.astype(dtype=scipy.float64, order='F', copy=copyFlag)
    weights = weights.astype(dtype=scipy.float64, order='F', copy=copyFlag)
    jd = jd.astype(dtype=scipy.int32, order='F', copy=copyFlag)
    vp = vp.astype(dtype=scipy.float64, order='F', copy=copyFlag)
    cl = cl.astype(dtype=scipy.float64, order='F', copy=copyFlag)
    ulam = ulam.astype(dtype=scipy.float64, order='F', copy=copyFlag)

    ######################################
    # --------- ALLOCATE OUTPUTS ---------
    ######################################
    # lmu
    lmu = -1
    lmu_r = ctypes.c_int(lmu)
    # a0
    a0 = scipy.zeros([nr, nlam], dtype=scipy.float64)
    a0 = a0.astype(dtype=scipy.float64, order='F', copy=False)
    a0_r = a0.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
    # ca
    ca = scipy.zeros([nx, nr, nlam], dtype=scipy.float64)
    ca = ca.astype(dtype=scipy.float64, order='F', copy=False)
    ca_r = ca.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
    # ia
    ia = -1 * scipy.ones([nx], dtype=scipy.int32)
    ia = ia.astype(dtype=scipy.int32, order='F', copy=False)
    ia_r = ia.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
    # nin
    nin = -1 * scipy.ones([nlam], dtype=scipy.int32)
    nin = nin.astype(dtype=scipy.int32, order='F', copy=False)
    nin_r = nin.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
    # rsq
    rsq = -1 * scipy.ones([nlam], dtype=scipy.float64)
    rsq = rsq.astype(dtype=scipy.float64, order='F', copy=False)
    rsq_r = rsq.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
    # alm
    alm = -1 * scipy.ones([nlam], dtype=scipy.float64)
    alm = alm.astype(dtype=scipy.float64, order='F', copy=False)
    alm_r = alm.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
    # nlp
    nlp = -1
    nlp_r = ctypes.c_int(nlp)
    # jerr
    jerr = -1
    jerr_r = ctypes.c_int(jerr)

    #  ###################################
    #   main glmnet fortran caller
    #  ###################################
    if is_sparse:
        # sparse multnet
        glmlib.multspelnet_(
            ctypes.byref(ctypes.c_double(parm)),
            ctypes.byref(ctypes.c_int(nobs)),
            ctypes.byref(ctypes.c_int(nvars)), ctypes.byref(ctypes.c_int(nr)),
            x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
            pcs.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),
            irs.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),
            y.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
            weights.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
            jd.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),
            vp.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
            cl.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
            ctypes.byref(ctypes.c_int(ne)), ctypes.byref(ctypes.c_int(nx)),
            ctypes.byref(ctypes.c_int(nlam)),
            ctypes.byref(ctypes.c_double(flmin)),
            ulam.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
            ctypes.byref(ctypes.c_double(thresh)),
            ctypes.byref(ctypes.c_int(isd)), ctypes.byref(ctypes.c_int(jsd)),
            ctypes.byref(ctypes.c_int(intr)),
            ctypes.byref(ctypes.c_int(maxit)), ctypes.byref(lmu_r), a0_r,
            ca_r, ia_r, nin_r, rsq_r, alm_r, ctypes.byref(nlp_r),
            ctypes.byref(jerr_r))
    else:
        # call fortran multnet routine
        glmlib.multelnet_(
            ctypes.byref(ctypes.c_double(parm)),
            ctypes.byref(ctypes.c_int(nobs)),
            ctypes.byref(ctypes.c_int(nvars)), ctypes.byref(ctypes.c_int(nr)),
            x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
            y.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
            weights.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
            jd.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),
            vp.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
            cl.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
            ctypes.byref(ctypes.c_int(ne)), ctypes.byref(ctypes.c_int(nx)),
            ctypes.byref(ctypes.c_int(nlam)),
            ctypes.byref(ctypes.c_double(flmin)),
            ulam.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
            ctypes.byref(ctypes.c_double(thresh)),
            ctypes.byref(ctypes.c_int(isd)), ctypes.byref(ctypes.c_int(jsd)),
            ctypes.byref(ctypes.c_int(intr)),
            ctypes.byref(ctypes.c_int(maxit)), ctypes.byref(lmu_r), a0_r,
            ca_r, ia_r, nin_r, rsq_r, alm_r, ctypes.byref(nlp_r),
            ctypes.byref(jerr_r))

    #  ###################################
    #   post process results
    #  ###################################

    # check for error
    if (jerr_r.value > 0):
        raise ValueError("Fatal glmnet error in library call : error code = ",
                         jerr_r.value)
    elif (jerr_r.value < 0):
        print("Warning: Non-fatal error in glmnet library call: error code = ",
              jerr_r.value)
        print("Check results for accuracy. Partial or no results returned.")

    # clip output to correct sizes
    lmu = lmu_r.value
    a0 = a0[0:nr, 0:lmu]
    ca = ca[0:nx, 0:nr, 0:lmu]
    ia = ia[0:nx]
    nin = nin[0:lmu]
    rsq = rsq[0:lmu]
    alm = alm[0:lmu]

    # ninmax
    ninmax = max(nin)
    # fix first value of alm (from inf to correct value)
    if ulam[0] == 0.0:
        t1 = scipy.log(alm[1])
        t2 = scipy.log(alm[2])
        alm[0] = scipy.exp(2 * t1 - t2)
    # create return fit dictionary
    if nr > 1:
        dfmat = a0.copy()
        dd = scipy.array([nvars, lmu], dtype=scipy.integer)
        beta_list = list()
        if ninmax > 0:
            # TODO: is the reshape here done right?
            ca = scipy.reshape(ca, (nx, nr, lmu))
            ca = ca[0:ninmax, :, :]
            ja = ia[0:ninmax] - 1  # ia is 1-indexed in fortran
            oja = scipy.argsort(ja)
            ja1 = ja[oja]
            df = scipy.any(scipy.absolute(ca) > 0, axis=1)
            df = scipy.sum(df, axis=0)
            df = scipy.reshape(df, (1, df.size))
            for k in range(0, nr):
                ca1 = scipy.reshape(ca[:, k, :], (ninmax, lmu))
                cak = ca1[oja, :]
                dfmat[k, :] = scipy.sum(scipy.absolute(cak) > 0, axis=0)
                beta = scipy.zeros([nvars, lmu], dtype=scipy.float64)
                beta[ja1, :] = cak
                beta_list.append(beta)
        else:
            for k in range(0, nr):
                dfmat[k, :] = scipy.zeros([1, lmu], dtype=scipy.float64)
                beta_list.append(scipy.zeros([nvars, lmu],
                                             dtype=scipy.float64))
            #
            df = scipy.zeros([1, lmu], dtype=scipy.float64)
        #
        fit = dict()
        fit['beta'] = beta_list
        fit['dfmat'] = dfmat
    else:
        dd = scipy.array([nvars, lmu], dtype=scipy.integer)
        if ninmax > 0:
            ca = ca[0:ninmax, :]
            df = scipy.sum(scipy.absolute(ca) > 0, axis=0)
            ja = ia[0:ninmax] - 1
            # ia is 1-indexes in fortran
            oja = scipy.argsort(ja)
            ja1 = ja[oja]
            beta = scipy.zeros([nvars, lmu], dtype=scipy.float64)
            beta[ja1, :] = ca[oja, :]
        else:
            beta = scipy.zeros([nvars, lmu], dtype=scipy.float64)
            df = scipy.zeros([1, lmu], dtype=scipy.float64)
            fit['beta'] = beta

    fit['a0'] = a0
    fit['dev'] = rsq
    fit['nulldev'] = nulldev
    fit['df'] = df
    fit['lambdau'] = alm
    fit['npasses'] = nlp_r.value
    fit['jerr'] = jerr_r.value
    fit['dim'] = dd
    fit['offset'] = is_offset
    fit['class'] = 'mrelnet'

    #  ###################################
    #   return to caller
    #  ###################################

    return fit
示例#35
0
def plotbeamparametersv2(times,
                         configfile,
                         maindir,
                         fitdir='Fitted',
                         params=['Ne'],
                         filetemplate='params',
                         suptitle='Parameter Comparison',
                         werrors=False,
                         nelog=True):
    """ This function will plot the desired parameters for each beam along range.
        The values of the input and measured parameters will be plotted
        Inputs 
            Times - A list of times that will be plotted.
            configfile - The INI file with the simulation parameters that will be useds.
            maindir - The directory the images will be saved in.
            params - List of Parameter names that will be ploted. These need to match
                in the ionocontainer names.
            filetemplate - The first part of a the file names.
            suptitle - The supertitle for the plots.
            werrors - A bools that determines if the errors will be plotted.
    """
    sns.set_style("whitegrid")
    sns.set_context("notebook")
    #    rc('text', usetex=True)
    ffit = os.path.join(maindir, fitdir, 'fitteddata.h5')
    inputfiledir = os.path.join(maindir, 'Origparams')
    (sensdict, simparams) = readconfigfile(configfile)

    paramslower = [ip.lower() for ip in params]
    Nt = len(times)
    Np = len(params)

    #Read in fitted data

    Ionofit = IonoContainer.readh5(ffit)
    dataloc = Ionofit.Sphere_Coords
    pnames = Ionofit.Param_Names
    pnameslower = sp.array([ip.lower() for ip in pnames.flatten()])
    p2fit = [
        sp.argwhere(ip == pnameslower)[0][0] if ip in pnameslower else None
        for ip in paramslower
    ]
    time2fit = [None] * Nt

    for itn, itime in enumerate(times):
        filear = sp.argwhere(Ionofit.Time_Vector >= itime)
        if len(filear) == 0:
            filenum = len(Ionofit.Time_Vector) - 1
        else:
            filenum = filear[0][0]
        time2fit[itn] = filenum
    times_int = [Ionofit.Time_Vector[i] for i in time2fit]

    # determine the beams
    angles = dataloc[:, 1:]
    rng = sp.unique(dataloc[:, 0])
    b = np.ascontiguousarray(angles).view(
        np.dtype((np.void, angles.dtype.itemsize * angles.shape[1])))
    _, idx, invidx = np.unique(b, return_index=True, return_inverse=True)

    beamlist = angles[idx]

    Nb = beamlist.shape[0]

    # Determine which imput files are to be used.

    dirlist = glob.glob(os.path.join(inputfiledir, '*.h5'))
    filesonly = [
        os.path.splitext(os.path.split(ifile)[-1])[0] for ifile in dirlist
    ]
    sortlist, outime, outfilelist, timebeg, timelist_s = IonoContainer.gettimes(
        dirlist)
    timelist = sp.array([int(i.split()[0]) for i in filesonly])
    time2file = [None] * Nt

    time2intime = [None] * Nt
    # go through times find files and then times in files
    for itn, itime in enumerate(times):

        filear = sp.argwhere(timelist >= itime)
        if len(filear) == 0:
            filenum = [len(timelist) - 1]
        else:
            filenum = filear[0]

        flist1 = []
        timeinflist = []
        for ifile in filenum:
            filetimes = timelist_s[ifile]
            log1 = (filetimes[:, 0] >= times_int[itn][0]) & (filetimes[:, 0] <
                                                             times_int[itn][1])
            log2 = (filetimes[:, 1] > times_int[itn][0]) & (filetimes[:, 1] <=
                                                            times_int[itn][1])
            log3 = (filetimes[:, 0] <= times_int[itn][0]) & (filetimes[:, 1] >
                                                             times_int[itn][1])
            log4 = (filetimes[:, 0] > times_int[itn][0]) & (filetimes[:, 1] <
                                                            times_int[itn][1])
            curtimes1 = sp.where(log1 | log2 | log3 | log4)[0].tolist()
            flist1 = flist1 + [ifile] * len(curtimes1)
            timeinflist = timeinflist + curtimes1
        time2intime[itn] = timeinflist
        time2file[itn] = flist1
    nfig = int(sp.ceil(Nt * Nb * Np / 9.0))
    imcount = 0
    curfilenum = -1
    # Loop for the figures
    for i_fig in range(nfig):
        lines = [None] * 2
        labels = [None] * 2
        (figmplf, axmat) = plt.subplots(3, 3, figsize=(20, 15), facecolor='w')
        axvec = axmat.flatten()
        # loop that goes through each axis loops through each parameter, beam
        # then time.
        for iax, ax in enumerate(axvec):
            if imcount >= Nt * Nb * Np:
                break
            itime = int(sp.floor(imcount / Nb / Np))
            iparam = int(imcount / Nb - Np * itime)
            ibeam = int(imcount - (itime * Np * Nb + iparam * Nb))
            curbeam = beamlist[ibeam]

            altlist = sp.sin(curbeam[1] * sp.pi / 180.) * rng

            curparm = paramslower[iparam]
            # Use Ne from input to compare the ne derived from the power.
            if curparm == 'nepow':
                curparm_in = 'ne'
            else:
                curparm_in = curparm

            curcoord = sp.zeros(3)
            curcoord[1:] = curbeam

            for iplot, filenum in enumerate(time2file[itime]):

                if curfilenum != filenum:
                    curfilenum = filenum
                    datafilename = dirlist[filenum]
                    Ionoin = IonoContainer.readh5(datafilename)
                    if ('ti' in paramslower) or ('vi' in paramslower):
                        Ionoin = maketi(Ionoin)
                    pnames = Ionoin.Param_Names
                    pnameslowerin = sp.array(
                        [ip.lower() for ip in pnames.flatten()])
                prmloc = sp.argwhere(curparm_in == pnameslowerin)
                if prmloc.size != 0:
                    curprm = prmloc[0][0]
                # build up parameter vector bs the range values by finding the closest point in space in the input
                curdata = sp.zeros(len(rng))
                for irngn, irng in enumerate(rng):
                    curcoord[0] = irng
                    tempin = Ionoin.getclosestsphere(curcoord)[0][
                        time2intime[itime]]
                    Ntloc = tempin.shape[0]
                    tempin = sp.reshape(tempin, (Ntloc, len(pnameslowerin)))
                    curdata[irngn] = tempin[0, curprm]
                #actual plotting of the input data
                lines[0] = ax.plot(curdata,
                                   altlist,
                                   marker='o',
                                   c='b',
                                   linewidth=2)[0]
                labels[0] = 'Input Parameters'
            # Plot fitted data for the axis

            indxkep = np.argwhere(invidx == ibeam)[:, 0]
            curfit = Ionofit.Param_List[indxkep, time2fit[itime],
                                        p2fit[iparam]]
            rng_fit = dataloc[indxkep, 0]
            alt_fit = rng_fit * sp.sin(curbeam[1] * sp.pi / 180.)
            errorexist = 'n' + paramslower[iparam] in pnameslower
            if errorexist and werrors:
                eparam = sp.argwhere('n' +
                                     paramslower[iparam] == pnameslower)[0][0]
                curerror = Ionofit.Param_List[indxkep, time2fit[itime], eparam]
                lines[1] = ax.errorbar(curfit,
                                       alt_fit,
                                       xerr=curerror,
                                       fmt='-.',
                                       c='g',
                                       linewidth=2)[0]
            else:
                lines[1] = ax.plot(curfit,
                                   alt_fit,
                                   marker='o',
                                   c='g',
                                   linewidth=2)[0]
            labels[1] = 'Fitted Parameters'
            # get and plot the input data

            numplots = len(time2file[itime])

            # set the limit for the parameter
            if curparm_in != 'ne':
                ax.set(xlim=[0.75 * sp.amin(curfit), 1.25 * sp.amax(curfit)])
            if curparm == 'vi':
                ax.set(xlim=[
                    -1.25 * sp.amax(sp.absolute(curfit)), 1.25 *
                    sp.amax(sp.absolute(curfit))
                ])
            if (curparm_in == 'ne') and nelog:
                ax.set_xscale('log')

            ax.set_xlabel(params[iparam])
            ax.set_ylabel('Alt km')
            ax.set_title(
                '{0} vs Altitude, Time: {1}s Az: {2}$^o$ El: {3}$^o$'.format(
                    params[iparam], times[itime], *curbeam))
            imcount = imcount + 1
        # save figure
        figmplf.suptitle(suptitle, fontsize=20)
        if None in labels:
            labels.remove(None)
            lines.remove(None)
        plt.figlegend(lines,
                      labels,
                      loc='lower center',
                      ncol=5,
                      labelspacing=0.)
        fname = filetemplate + '_{0:0>3}.png'.format(i_fig)
        plt.savefig(fname)
        plt.close(figmplf)
示例#36
0
import matplotlib.pyplot as plt
from astropy import stats
import seaborn as sns

from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor
from sklearn import cross_validation
from sklearn.neighbors import KNeighborsRegressor, RadiusNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.gaussian_process import GaussianProcessRegressor


sfile = '/data2/mrs493/my_data2.csv'

df = pd.read_csv(sfile, sep=',')

colour = sp.reshape(df.colour, (-1, 1))
    #reshape the colour to a column vector for use in the algorithm
    
designation = sp.array(df.designation.tolist())

temp = sp.array(df.teff.tolist())

"""
possibly remove SVC, takes long time (~4 mins per fold)
"""

folds = 2

names = ['KNeighbours', 'Radius Neighbours', 'Random Forest Regressor',
             'Linear Regression', 'Gaussian Process Regressor', 'Ada Boost Classifier']
classifiers = [KNeighborsRegressor(), RadiusNeighborsRegressor(), RandomForestRegressor(),
示例#37
0
def cvmrelnet(fit,
              lambdau,
              x,
              y,
              weights,
              offset,
              foldid,
              ptype,
              grouped,
              keep=False):
    
    typenames = {'deviance':'Mean-Squared Error', 'mse':'Mean-Squared Error', 
                 'mae':'Mean Absolute Error'}
    if ptype == 'default':
        ptype = 'mse'

    ptypeList = ['mse', 'mae', 'deviance']    
    if not ptype in ptypeList:
        print('Warning: only ', ptypeList, 'available for Gaussian models; ''mse'' used')
        ptype = 'mse'

    nobs, nc = y.shape
    
    if len(offset) > 0:
        y = y - offset

    predmat = scipy.ones([nobs, nc, lambdau.size])*scipy.NAN               
    nfolds = scipy.amax(foldid) + 1
    nlams = [] 
    for i in range(nfolds):
        which = foldid == i
        fitobj = fit[i].copy()
        fitobj['offset'] = False
        preds = glmnetPredict(fitobj, x[which, ])
        nlami = scipy.size(fit[i]['lambdau'])
        predmat[which, 0:nlami] = preds
        nlams.append(nlami)
    # convert nlams to scipy array
    nlams = scipy.array(nlams, dtype=scipy.integer)

    N = nobs - scipy.reshape(scipy.sum(scipy.isnan(predmat[:, 1, :]), axis=0), (1, -1))
    bigY = scipy.tile(y[:, :, None], [1, 1, lambdau.size])

    if ptype == 'mse':
        cvraw = scipy.sum((bigY - predmat)**2, axis=1).squeeze()
    elif ptype == 'mae':
        cvraw = scipy.sum(scipy.absolute(bigY - predmat), axis=1).squeeze()
        
    if y.size/nfolds < 3 and grouped == True:
        print('Option grouped=false enforced in cv.glmnet, since < 3 observations per fold')
        grouped = False
        
    if grouped == True:
        cvob = cvcompute(cvraw, weights, foldid, nlams)
        cvraw = cvob['cvraw']
        weights = cvob['weights']
        N = cvob['N']
        
    cvm = wtmean(cvraw, weights)
    sqccv = (cvraw - cvm)**2
    cvsd = scipy.sqrt(wtmean(sqccv, weights)/(N-1))

    result = dict()
    result['cvm'] = cvm
    result['cvsd'] = cvsd
    result['name'] = typenames[ptype]

    if keep:
        result['fit_preval'] = predmat
        
    return result
示例#38
0
def gcbias_lite(coveragefile,
                bedfilename,
                reference,
                fileout,
                graphtitle=None,
                executiongranted=None,
                status=None,
                bedTools=False):
    """************************************************************************************************************************************************************
	Task: draws coverage as a function of gc content. IMPROVED VERSION of gcbias that avoids the use of bedtools (pybedtools)
	Input:
		coveragefile: string containing the full path of the bam.coverage file to analyze. This file has been built according to 1-base format
		bedfilename: target file -> assumes original-standard bed file
		reference: fasta file with reference genome
		fileout: string containing the full path of the bmp file where the restulting figure will be saved.
		bedTools: whether pybedtools are used instead of the own method
	Output: a png file will be created named "fileout" where a graph that compares gc content and mean coverage will be saved.	
	************************************************************************************************************************************************************"""

    if (executiongranted <> None):
        executiongranted.acquire()

    pid = str(os.getpid())

    #	print 'Processing '+coveragefile
    #	print 'Results will be written at '+fileout
    coverage = region_coverage(
        coveragefile)  # Calculate mean coverage per region

    ##	fdw=file('regionCoverage.txt','w')
    ##	for element in sorted(coverage.keys()):
    ##		fdw.write(str(element)+'\n')
    ##	fdw.close()

    if (len(coverage) > 1):

        if not bedTools:  # Own method
            #			print 'Own method'
            chromosomes = {}
            allKeys = coverage.keys()

            for currentKey in allKeys:
                chromosomes[currentKey[
                    0]] = 1  # Stores all chromosomes to be examined (the ones contained in the target file)

            # Load BED file -> since coverage information is in 1-base format, BED format must be transformed to 1-base
            bed = bed_file.bed_file(bedfilename)
            sortedBed = bed.my_sort_bed()  # Sort bed avoiding bedtools
            nonOverlappingBed = sortedBed.non_overlapping_exons(
                1
            )  # Base 1!!! # This generates a BED file in base 1 (Non-standard BED)
            finalBed = nonOverlappingBed.my_sort_bed(
            )  # BED file in base 1 (Non-standard BED)
            finalBed.load_custom(
                -1
            )  # Load chromosome and positions in base 1....(finalBed is in base 1 -> Non-standard BED)

            #Load FASTA file
            fastaFile = file(reference, 'r')

            storeSequence = False
            wholeChromosome = ''
            currentChromosome = ''
            gccontent = {}

            for line in fastaFile:  # Read each line of the fasta file
                if line.startswith(
                        '>'
                ):  # New chromosome starts -> reading a new line until another '>' is found
                    #					print 'Processing ' +line+'\n'
                    if storeSequence:  # a chromosome has been read run gc bias
                        currentGCcontent = measureGCbias(
                            wholeChromosome, currentChromosome, finalBed)
                        gccontent.update(currentGCcontent)  # Update dictionary
                        storeSequence = False
                    currentChromosome = re.split(
                        ' +', line
                    )[0]  # Format: >1 dna:chromosome chromosome:GRCh37:1:1:249250621:1
                    currentChromosome = currentChromosome.split(
                        '>')[1].strip()  # Chromosome string
                    if (
                            currentChromosome in chromosomes
                    ):  # If current chromosome read in the FASTA file is in the list of chromosomes in the BED file
                        storeSequence = True
                    wholeChromosome = ''  # To store whole sequence for the current chromosome
                elif (not re.search('>', line) and storeSequence):
                    wholeChromosome = wholeChromosome + line.rstrip(
                    )  # Remove '\n' from current line and concatenates to wholeChromosome

            if (storeSequence):  # For the last chromosome
                currentGCcontent = measureGCbias(wholeChromosome,
                                                 currentChromosome, finalBed)
                gccontent.update(currentGCcontent)  # Update dictionary

            fastaFile.close()
            region_ids = []
            region_ids = coverage.keys()

            if (len(gccontent) == 0):
                print 'ERROR: G+C content values can not be calculated. Probably the provided reference file ' + reference + ' does not match with '
                print '	the target file ' + bedfilename + '. That is, sequences of regions in the target file are probably not included within the'
                print '	reference file.'
                sys.exit(1)

        else:
            print 'Calculating nt content by means of pybedtools...'
            bed = bed_file.bed_file(bedfilename)
            sortedBed = bed.my_sort_bed()  # Sort bed avoiding bedtools
            nonOverlappingBed = sortedBed.non_overlapping_exons(
                1)  # base one!!!
            finalBed = nonOverlappingBed.my_sort_bed()  # BED file in base 1
            bedfd = pybedtools.BedTool(finalBed.filename)
            bedfd = bedfd.remove_invalid(
            )  # Remove negative coordinates or features with length=0, which do not work with bedtools
            pybedtools._bedtools_installed = True
            pybedtools.set_bedtools_path(BEDTOOLSPATH)
            ntcontent = bedfd.nucleotide_content(reference)

            # Each entry in ntcontent is parsed to extract the gc content of each exon
            gccontent = {}
            for entry in ntcontent:
                gccontent[(entry.fields[0], string.atoi(entry.fields[1]),
                           string.atoi(entry.fields[2]))] = string.atof(
                               entry.fields[-8]) * 100
            print '	Done.'
            # gccontent keys in dictionary: chromosome, exon init, exon end

            region_ids = []
            for currentKey in coverage.keys(
            ):  # Pybedtools does not work with regions with zero length -> remove them (there are a few of them)
                if currentKey[1] != currentKey[2]:
                    region_ids.append(currentKey)

##
##		fdw=file('gcContent.txt','w')
##		for element in sorted(gccontent.keys()):
##			fdw.write(str(element)+'\n')
##		fdw.close()
##
#region_ids = gccontent.keys()
        coveragearray = numpy.array([coverage[id] for id in region_ids])
        gccontentarray = numpy.array([gccontent[id]
                                      for id in region_ids])  # Values in [0,1]

        #		fig = pyplot.figure(figsize=(6,6))
        #		ax = fig.add_subplot(111)
        #
        #		ax.hist(gccontentarray,bins=100)
        #		fig.suptitle('Dsitribution of GC content regardless of coverage value')
        #		ax.set_ylabel('Frequency')
        #		ax.set_xlabel('GC content')
        #		ax.set_xlim(0, 100)
        #		fig.savefig('distribution.png')

        xmin = gccontentarray.min()
        xmax = gccontentarray.max(
        )  # Due to the imshow sentence, we need to rescale gccontent from [0,1] to [0,100]
        ymin = coveragearray.min()
        ymax = coveragearray.max()

        # Perform a kernel density estimator on the results
        X, Y = mgrid[xmin:xmax:100j, ymin:ymax:100j]
        positions = c_[X.ravel(), Y.ravel()]
        values = c_[gccontentarray, coveragearray]
        kernel = stats.kde.gaussian_kde(values.T)
        Z = reshape(kernel(positions.T).T, X.T.shape)

        fig = pyplot.figure(figsize=(6, 6))
        ax = fig.add_subplot(111)
        sc = ax.imshow(
            rot90(Z),
            cmap=cm.gist_earth_r,
            extent=[xmin, 100, ymin, ymax],
            aspect="auto"
        )  # Due to the imshow sentence, we need to rescale gccontent from [0,1] to [0,100]
        cbar = fig.colorbar(sc, ticks=[numpy.min(Z), numpy.max(Z)])
        cbar.ax.set_yticklabels(['Low', 'High'])
        cbar.set_label('Density')
        ax.set_xlabel('GC content (%)')
        ax.set_ylabel('Mean coverage')

        if (len(graphtitle) > 25):
            ax.set_title(graphtitle[:25] + '...')
        else:
            ax.set_title(graphtitle)

        fig.savefig(fileout)
        matplotlib.pyplot.close(fig)

        if (status <> None):
            meanvalue = gccontentarray.mean()
            status.value = (meanvalue >= 45 and meanvalue <= 55)

    else:
        print 'WARNING: only one region found in the bed file. Skipping GC bias calculation.'

    if (executiongranted <> None):
        executiongranted.release()
示例#39
0
def bovy_dens2d(X, **kwargs):
    """
    NAME:

       bovy_dens2d

    PURPOSE:

       plot a 2d density with optional contours

    INPUT:

       first argument is the density

       matplotlib.pyplot.imshow keywords (see http://matplotlib.sourceforge.net/api/axes_api.html#matplotlib.axes.Axes.imshow)

       xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed

       ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed

       xrange

       yrange

       noaxes - don't plot any axes

       overplot - if True, overplot

       colorbar - if True, add colorbar

       shrink= colorbar argument: shrink the colorbar by the factor (optional)

       Contours:
       
       contours - if True, draw contours (10 by default)

       levels - contour-levels

       cntrmass - if True, the density is a probability and the levels 
                  are probability masses contained within the contour

       cntrcolors - colors for contours (single color or array)

       cntrlabel - label the contours

       cntrlw, cntrls - linewidths and linestyles for contour

       cntrlabelsize, cntrlabelcolors,cntrinline - contour arguments

       onedhists - if True, make one-d histograms on the sides

       onedhistcolor - histogram color

       retAxes= return all Axes instances

    OUTPUT:

    HISTORY:

       2010-03-09 - Written - Bovy (NYU)

    """
    if kwargs.has_key('overplot'):
        overplot = kwargs['overplot']
        kwargs.pop('overplot')
    else:
        overplot = False
    if not overplot:
        pyplot.figure()
    if kwargs.has_key('xlabel'):
        xlabel = kwargs['xlabel']
        kwargs.pop('xlabel')
    else:
        xlabel = None
    if kwargs.has_key('ylabel'):
        ylabel = kwargs['ylabel']
        kwargs.pop('ylabel')
    else:
        ylabel = None
    if kwargs.has_key('zlabel'):
        zlabel = kwargs['zlabel']
        kwargs.pop('zlabel')
    else:
        zlabel = None
    if kwargs.has_key('extent'):
        extent = kwargs['extent']
        kwargs.pop('extent')
    else:
        if kwargs.has_key('xrange'):
            xlimits = list(kwargs['xrange'])
            kwargs.pop('xrange')
        else:
            xlimits = [0, X.shape[0]]
        if kwargs.has_key('yrange'):
            ylimits = list(kwargs['yrange'])
            kwargs.pop('yrange')
        else:
            ylimits = [0, X.shape[1]]
        extent = xlimits + ylimits
    if not kwargs.has_key('aspect'):
        kwargs['aspect'] = (xlimits[1] - xlimits[0]) / float(ylimits[1] -
                                                             ylimits[0])
    if kwargs.has_key('noaxes'):
        noaxes = kwargs['noaxes']
        kwargs.pop('noaxes')
    else:
        noaxes = False
    if (kwargs.has_key('contours') and kwargs['contours']) or \
            kwargs.has_key('levels') or \
            (kwargs.has_key('cntrmass') and kwargs['cntrmass']):
        contours = True
    else:
        contours = False
    if kwargs.has_key('contours'): kwargs.pop('contours')
    if kwargs.has_key('levels'):
        levels = kwargs['levels']
        kwargs.pop('levels')
    elif contours:
        if kwargs.has_key('cntrmass') and kwargs['cntrmass']:
            levels = sc.linspace(0., 1., _DEFAULTNCNTR)
        elif True in sc.isnan(sc.array(X)):
            levels = sc.linspace(sc.nanmin(X), sc.nanmax(X), _DEFAULTNCNTR)
        else:
            levels = sc.linspace(sc.amin(X), sc.amax(X), _DEFAULTNCNTR)
    if kwargs.has_key('cntrmass') and kwargs['cntrmass']:
        cntrmass = True
        kwargs.pop('cntrmass')
    else:
        cntrmass = False
        if kwargs.has_key('cntrmass'): kwargs.pop('cntrmass')
    if kwargs.has_key('cntrcolors'):
        cntrcolors = kwargs['cntrcolors']
        kwargs.pop('cntrcolors')
    elif contours:
        cntrcolors = 'k'
    if kwargs.has_key('cntrlabel') and kwargs['cntrlabel']:
        cntrlabel = True
        kwargs.pop('cntrlabel')
    else:
        cntrlabel = False
        if kwargs.has_key('cntrlabel'): kwargs.pop('cntrlabel')
    if kwargs.has_key('cntrlw'):
        cntrlw = kwargs['cntrlw']
        kwargs.pop('cntrlw')
    elif contours:
        cntrlw = None
    if kwargs.has_key('cntrls'):
        cntrls = kwargs['cntrls']
        kwargs.pop('cntrls')
    elif contours:
        cntrls = None
    if kwargs.has_key('cntrlabelsize'):
        cntrlabelsize = kwargs['cntrlabelsize']
        kwargs.pop('cntrlabelsize')
    elif contours:
        cntrlabelsize = None
    if kwargs.has_key('cntrlabelcolors'):
        cntrlabelcolors = kwargs['cntrlabelcolors']
        kwargs.pop('cntrlabelcolors')
    elif contours:
        cntrlabelcolors = None
    if kwargs.has_key('cntrinline'):
        cntrinline = kwargs['cntrinline']
        kwargs.pop('cntrinline')
    elif contours:
        cntrinline = None
    if kwargs.has_key('retCumImage'):
        retCumImage = kwargs['retCumImage']
        kwargs.pop('retCumImage')
    else:
        retCumImage = False
    if kwargs.has_key('colorbar'):
        cb = kwargs['colorbar']
        kwargs.pop('colorbar')
    else:
        cb = False
    if kwargs.has_key('shrink'):
        shrink = kwargs['shrink']
        kwargs.pop('shrink')
    else:
        shrink = None
    if kwargs.has_key('onedhists'):
        onedhists = kwargs['onedhists']
        kwargs.pop('onedhists')
    else:
        onedhists = False
    if kwargs.has_key('onedhistcolor'):
        onedhistcolor = kwargs['onedhistcolor']
        kwargs.pop('onedhistcolor')
    else:
        onedhistcolor = 'k'
    if kwargs.has_key('retAxes'):
        retAxes = kwargs['retAxes']
        kwargs.pop('retAxes')
    else:
        retAxes = False
    if onedhists:
        if overplot: fig = pyplot.gcf()
        else: fig = pyplot.figure()
        nullfmt = NullFormatter()  # no labels
        # definitions for the axes
        left, width = 0.1, 0.65
        bottom, height = 0.1, 0.65
        bottom_h = left_h = left + width
        rect_scatter = [left, bottom, width, height]
        rect_histx = [left, bottom_h, width, 0.2]
        rect_histy = [left_h, bottom, 0.2, height]
        axScatter = pyplot.axes(rect_scatter)
        axHistx = pyplot.axes(rect_histx)
        axHisty = pyplot.axes(rect_histy)
        # no labels
        axHistx.xaxis.set_major_formatter(nullfmt)
        axHistx.yaxis.set_major_formatter(nullfmt)
        axHisty.xaxis.set_major_formatter(nullfmt)
        axHisty.yaxis.set_major_formatter(nullfmt)
        fig.sca(axScatter)
    ax = pyplot.gca()
    ax.set_autoscale_on(False)
    out = pyplot.imshow(X, extent=extent, **kwargs)
    pyplot.axis(extent)
    _add_axislabels(xlabel, ylabel)
    _add_ticks()
    #Add colorbar
    if cb:
        if shrink is None:
            if kwargs.has_key('aspect'):
                shrink = sc.amin([float(kwargs['aspect']) * 0.87, 1.])
            else:
                shrink = 0.87
        CB1 = pyplot.colorbar(out, shrink=shrink)
        if not zlabel is None:
            if zlabel[0] != '$':
                thiszlabel = r'$' + zlabel + '$'
            else:
                thiszlabel = zlabel
            CB1.set_label(zlabel)
    if contours or retCumImage:
        if kwargs.has_key('aspect'):
            aspect = kwargs['aspect']
        else:
            aspect = None
        if kwargs.has_key('origin'):
            origin = kwargs['origin']
        else:
            origin = None
        if cntrmass:
            #Sum from the top down!
            X[sc.isnan(X)] = 0.
            sortindx = sc.argsort(X.flatten())[::-1]
            cumul = sc.cumsum(sc.sort(X.flatten())[::-1]) / sc.sum(X.flatten())
            cntrThis = sc.zeros(sc.prod(X.shape))
            cntrThis[sortindx] = cumul
            cntrThis = sc.reshape(cntrThis, X.shape)
        else:
            cntrThis = X
        if contours:
            cont = pyplot.contour(cntrThis,
                                  levels,
                                  colors=cntrcolors,
                                  linewidths=cntrlw,
                                  extent=extent,
                                  aspect=aspect,
                                  linestyles=cntrls,
                                  origin=origin)
            if cntrlabel:
                pyplot.clabel(cont,
                              fontsize=cntrlabelsize,
                              colors=cntrlabelcolors,
                              inline=cntrinline)
    if noaxes:
        ax.set_axis_off()
    #Add onedhists
    if not onedhists:
        if retCumImage:
            return cntrThis
        elif retAxes:
            return pyplot.gca()
        else:
            return out
    histx = sc.nansum(X.T, axis=1) * m.fabs(ylimits[1] - ylimits[0]) / X.shape[
        1]  #nansum bc nan is *no dens value*
    histy = sc.nansum(X.T,
                      axis=0) * m.fabs(xlimits[1] - xlimits[0]) / X.shape[0]
    histx[sc.isnan(histx)] = 0.
    histy[sc.isnan(histy)] = 0.
    dx = (extent[1] - extent[0]) / float(len(histx))
    axHistx.plot(sc.linspace(extent[0] + dx, extent[1] - dx, len(histx)),
                 histx,
                 drawstyle='steps-mid',
                 color=onedhistcolor)
    dy = (extent[3] - extent[2]) / float(len(histy))
    axHisty.plot(histy,
                 sc.linspace(extent[2] + dy, extent[3] - dy, len(histy)),
                 drawstyle='steps-mid',
                 color=onedhistcolor)
    axHistx.set_xlim(axScatter.get_xlim())
    axHisty.set_ylim(axScatter.get_ylim())
    axHistx.set_ylim(0, 1.2 * sc.amax(histx))
    axHisty.set_xlim(0, 1.2 * sc.amax(histy))
    if retCumImage:
        return cntrThis
    elif retAxes:
        return (axScatter, axHistx, axHisty)
    else:
        return out
示例#40
0
def train(X,
          K,
          y,
          mu,
          mu2,
          group=[[0, 1], [2, 3, 4]],
          numintervals=100,
          ldeltamin=-5,
          ldeltamax=5,
          rho=1,
          alpha=1,
          debug=False):
    """
    train linear mixed model lasso

    Input:
    X: SNP matrix: n_s x n_f
    y: phenotype:  n_s x 1
    K: kinship matrix: n_s x n_s
    mu: l1-penalty parameter
    numintervals: number of intervals for delta linesearch
    ldeltamin: minimal delta value (log-space)
    ldeltamax: maximal delta value (log-space)
    rho: augmented Lagrangian parameter for Lasso solver
    alpha: over-relatation parameter (typically ranges between 1.0 and 1.8) for Lasso solver

    Output:
    results
    """

    time_start = time.time()
    [n_s, n_f] = X.shape
    assert X.shape[0] == y.shape[0], 'dimensions do not match'
    assert K.shape[0] == K.shape[1], 'dimensions do not match'
    assert K.shape[0] == X.shape[0], 'dimensions do not match'
    if y.ndim == 1:
        y = SP.reshape(y, (n_s, 1))

    # train null model
    S, U, ldelta0, monitor_nm = train_nullmodel(y,
                                                K,
                                                numintervals,
                                                ldeltamin,
                                                ldeltamax,
                                                debug=debug)

    # train lasso on residuals
    delta0 = SP.exp(ldelta0)
    Sdi = 1. / (S + delta0)
    Sdi_sqrt = SP.sqrt(Sdi)
    SUX = SP.dot(U.T, X)
    SUX = SUX * SP.tile(Sdi_sqrt, (n_f, 1)).T
    SUy = SP.dot(U.T, y)
    SUy = SUy * SP.reshape(Sdi_sqrt, (n_s, 1))

    w, monitor_lasso = train_lasso(SUX,
                                   SUy,
                                   mu,
                                   mu2,
                                   group,
                                   rho,
                                   alpha,
                                   debug=debug)

    time_end = time.time()
    time_diff = time_end - time_start
    print '... finished in %.2fs' % (time_diff)

    res = {}
    res['ldelta0'] = ldelta0
    res['weights'] = w
    res['time'] = time_diff
    res['monitor_lasso'] = monitor_lasso
    res['monitor_nm'] = monitor_nm
    return res
示例#41
0
def gcbias(filelist, fileoutlist, bedfilelist):
    """************************************************************************************************************************************************************
	Task: draws coverage as a function of gc content
	Input:
		filelist: list of strings, each containing the full path of the bam file to analyze.
		fileoutlist: list of strings, each containing the full path of the png file where the corresponding figure will be saved.
		bedfilelist: 
	Output: a bmp file will be created named "fileout" where a graph that compares gc content and mean coverage will be saved.	
	************************************************************************************************************************************************************"""

    pid = str(os.getpid())

    numpy.random.seed(1)
    ntotal_positions = []
    bamlist = []

    # Process each file and store counting results
    for filename in filelist:
        # Check whether index already exists for the bam file, needed for pysam use
        if (not os.path.isfile(filename + '.bai')):
            print 'Creating index for ' + filename
            pysam.index(filename)
            print '	Done.'

        bamlist.append(bam_file.bam_file(filename))
    sizes = numpy.array([bam.nreads() for bam in bamlist])
    minsize = sizes.min()

    print 'The smaller bam is ' + filelist[
        sizes.argmin()] + ' and contains ' + str(minsize) + ' reads.'

    # Process each file and store counting results
    for i, bamfile in enumerate(bamlist):

        print 'Processing ' + bamfile.filename
        print 'Results will be written at ' + fileoutlist[i]

        # Check whether normalization should be run
        if (normalize): normalizedbam = bamfile.normalize(minsize)
        else: normalizedbam = bamfile

        coveragefile = TMP + '/' + pid + '.coverage'
        print 'Calculating coverage per position...'
        run(BEDTOOLSPATH + 'coverageBed -d -abam ' + normalizedbam.filename +
            ' -b ' + bedfilelist[i] + ' > ' + coveragefile)

        coverage = region_coverage(coveragefile)

        print 'Calculating nt content...'
        bedfd = pybedtools.BedTool(bedfilelist[i])
        pybedtools._bedtools_installed = True
        pybedtools.set_bedtools_path(BEDTOOLSPATH)
        ntcontent = bedfd.nucleotide_content(REF)

        # Each entry in ntcontent is parsed to extract the gc content of each exon
        gccontent = {}
        for entry in ntcontent:
            gccontent[(entry.fields[0], string.atoi(
                entry.fields[1]), string.atoi(
                    entry.fields[2]))] = string.atof(entry.fields[-8]) * 100
        print '	Done.'

        fig = pyplot.figure(figsize=(13, 6))
        ax = fig.add_subplot(111)

        region_ids = coverage.keys()
        coveragearray = numpy.array([coverage[id] for id in region_ids])
        gccontentarray = numpy.array([gccontent[id]
                                      for id in region_ids])  # Values in [0,1]

        xmin = gccontentarray.min()
        xmax = gccontentarray.max(
        )  # Due to the imshow sentence, we need to rescale gccontent from [0,1] to [0,100]
        ymin = coveragearray.min()
        ymax = coveragearray.max()

        # Perform a kernel density estimator on the results
        X, Y = mgrid[xmin:xmax:100j, ymin:ymax:100j]
        positions = c_[X.ravel(), Y.ravel()]
        values = c_[gccontentarray, coveragearray]
        kernel = stats.kde.gaussian_kde(values.T)
        Z = reshape(kernel(positions.T).T, X.T.shape)

        fig = pyplot.figure(figsize=(6, 6))
        ax = fig.add_subplot(111)
        sc = ax.imshow(
            rot90(Z),
            cmap=cm.gist_earth_r,
            extent=[xmin, 100, ymin, ymax],
            aspect="auto"
        )  # Due to the imshow sentence, we need to rescale gccontent from [0,1] to [0,100]
        cbar = fig.colorbar(sc, ticks=[numpy.min(Z), numpy.max(Z)])
        cbar.ax.set_yticklabels(['Low', 'High'])
        cbar.set_label('Density')
        ax.set_xlabel('GC content (%)')
        ax.set_ylabel('Mean coverage')
        fig.savefig(fileoutlist[i])
        matplotlib.pyplot.close(fig)

    print '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Finished <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'
示例#42
0
def randomize_colors(im, keep_vals=[0]):
    r'''
    Takes a greyscale image and randomly shuffles the greyscale values, so that
    all voxels labeled X will be labelled Y, and all voxels labeled Y will be
    labeled Z, where X, Y, Z and so on are randomly selected from the values
    in the input image.

    This function is useful for improving the visibility of images with
    neighboring regions that are only incrementally different from each other,
    such as that returned by `scipy.ndimage.label`.

    Parameters
    ----------
    im : array_like
        An ND image of greyscale values.

    keep_vals : array_like
        Indicate which voxel values should NOT be altered.  The default is
        `[0]` which is useful for leaving the background of the image
        untouched.

    Returns
    -------
    An image the same size and type as `im` but with the greyscale values
    reassigned.  The unique values in both the input and output images will
    be identical.

    Notes
    -----
    If the greyscale values in the input image are not contiguous then the
    neither will they be in the output.

    Examples
    --------
    >>> import porespy as ps
    >>> import scipy as sp
    >>> sp.random.seed(0)
    >>> im = sp.random.randint(low=0, high=5, size=[4, 4])
    >>> print(im)
    [[4 0 3 3]
     [3 1 3 2]
     [4 0 0 4]
     [2 1 0 1]]
    >>> im_rand = ps.tools.randomize_colors(im)
    >>> print(im_rand)
    [[2 0 4 4]
     [4 1 4 3]
     [2 0 0 2]
     [3 1 0 1]]

    As can be seen, the 2's have become 3, 3's have become 4, and 4's have
    become 2.  1's remained 1 by random accident.  0's remain zeros by default,
    but this can be controlled using the `keep_vals` argument.

    '''
    im_flat = im.flatten()
    keep_vals = sp.array(keep_vals)
    swap_vals = ~sp.in1d(im_flat, keep_vals)
    im_vals = sp.unique(im_flat[swap_vals])
    new_vals = sp.random.permutation(im_vals)
    im_map = sp.zeros(shape=[
        sp.amax(im_vals) + 1,
    ], dtype=int)
    im_map[im_vals] = new_vals
    im_new = im_map[im_flat]
    im_new = sp.reshape(im_new, newshape=sp.shape(im))
    return im_new
示例#43
0
def cylinder_from_point_directionvector_length_and_radius(  vector, 
                                                            directionvector,
                                                            length,
                                                            radius ):
    """
    Creates a cylinder utilising OCC.BRepPrimAPI.BRepPrimAPI_MakeCylinder out 
    of scipy arrays
    
    @param vector: vektor of starting point on the cylinder main axis
    @type  vector: scipy array(3,1)
    @param directionvector: direction vector of the cylinder main axis
    @type  directionvector: scipy array(3,1)
    @param length: cylinder length
    @type  length: float
    @param radius: cylinder radius
    @type  radius: float
    @return: cylinder

    sample::
    
        Vector = scipy.array([ 10.0, 10.0, 10.0 ])
        Vector = scipy.reshape( Vector, (3,1))
        
        TangUnitVector = scipy.array([  (1/scipy.sqrt(3)) , 
                                         1/scipy.sqrt(3)) , 
                                         1/scipy.sqrt(3)) ])
        TangUnitVector = scipy.reshape( TangUnitVector, (3,1))

        CylLength = 200
        CylRadius = 3
    
        Cyli = cylinder_from_point_directionvector_length_and_radius(Vector, 
                                                                     TangUnitVector,
                                                                     CylLength,
                                                                     CylRadius ) 
        CyliShape = Cyli.Shape()
        display.DisplayColoredShape( CyliShape , 'RED' ) 
    """
    # Normalize the direction
    directionunitvector = NormVector(directionvector)
    # determine the second point
    vector2 = vector + length * directionunitvector    
    # components of vector in float values
    X1 = float( vector[ 0, 0] )
    Y1 = float( vector[ 1, 0] )
    Z1 = float( vector[ 2, 0] )
    # components of vector2 in float values
    X2 = float( vector2[ 0, 0] )
    Y2 = float( vector2[ 1, 0] )
    Z2 = float( vector2[ 2, 0] )
    # create OCC.gp.gp_Pnt-points
    P1 = OCC.gp.gp_Pnt( X1, Y1, Z1 )
    P2 = OCC.gp.gp_Pnt( X2, Y2, Z2 )
    # create direction unit vector from these points (not neccessary but if 
    # the directionunitvector is not of length 1 ...)
    directionP1P2 = scipy.array([   ( X2 - X1 ),
                                    ( Y2 - Y1 ),  
                                    ( Z2 - Z1 ) ])
    directionP1P2 = scipy.reshape( directionP1P2,(3,1))
    # distance between the points (X1, Y1, Z1) and (X2, Y2, Z2)
    length = length_column_vector(directionP1P2)
    # normalize direction
    directionP1P2 = NormVector(directionP1P2)                                                
    # origin at point 1 with OCC.gp.gp_Pnt
    origin_local_coordinate_system = OCC.gp.gp_Pnt( X1, Y1, Z1)
    # z-direction of the local coordinate system with OCC.gp.gp_Dir
    z_direction_local_coordinate_system = OCC.gp.gp_Dir(directionP1P2[0, 0], 
                                                        directionP1P2[1, 0], 
                                                        directionP1P2[2, 0])
    # local coordinate system with OCC.gp.gp_Ax2
    local_coordinate_system = OCC.gp.gp_Ax2(origin_local_coordinate_system, 
                                            z_direction_local_coordinate_system)
    # create cylinder utilising OCC.BRepPrimAPI.BRepPrimAPI_MakeCylinder
    cylinder = OCC.BRepPrimAPI.BRepPrimAPI_MakeCylinder(local_coordinate_system, 
                                                        radius, 
                                                        length, 
                                                        2 * scipy.pi )
    # return cylinder
    return cylinder
示例#44
0
    def get_covariances(self,hyperparams,debugging=False):
        """
        INPUT:
        hyperparams:  dictionary
        OUTPUT: dictionary with the fields
        Kr:     kernel on rows
        Kc:     kernel on columns
        Knoise: noise kernel
        """
        if self._is_cached(hyperparams):
            return self._covar_cache
        if self._covar_cache==None:
            self._covar_cache = {}
            
        if not(self._is_cached(hyperparams,keys=['covar_c'])):
            K_c = self.covar_c.K(hyperparams['covar_c'])
            S_c,U_c = LA.eigh(K_c)
            self._covar_cache['K_c'] = K_c
            self._covar_cache['U_c'] = U_c
            self._covar_cache['S_c'] = S_c
        else:
            K_c = self._covar_cache['K_c']
            U_c = self._covar_cache['U_c']
            S_c = self._covar_cache['S_c']
            
        if not(self._is_cached(hyperparams,keys=['covar_r'])):
            K_r = self.covar_r.K(hyperparams['covar_r'])
            S_r,U_r = LA.eigh(K_r)
            self._covar_cache['K_r'] = K_r
            self._covar_cache['U_r'] = U_r
            self._covar_cache['S_r'] = S_r
        else:
            K_r = self._covar_cache['K_r']
            U_r = self._covar_cache['U_r']
            S_r = self._covar_cache['S_r']

        S = SP.kron(S_c,S_r) + self.likelihood.Kdiag(hyperparams['lik'],self.nt)
        UYU = SP.dot(U_r.T,SP.dot(self.Y,U_c))
        YtildeVec = (1./S)*ravel(UYU)
        self._covar_cache['S'] = S
        self._covar_cache['UYU'] = UYU
        self._covar_cache['Ytilde'] = unravel(YtildeVec,self.n,self.t)

        
        if debugging:
            # test ravel operations
            UYUvec = ravel(UYU)
            UYU2 = unravel(UYUvec,self.n,self.t)
            SP.allclose(UYU2,UYU)

            # needed later
            Yvec = ravel(self.Y)
            K_noise = self.likelihood.K(hyperparams['lik'],self.nt) # only works for iid noise
            K = SP.kron(K_c,K_r) + K_noise
            #L = LA.cholesky(K).T
            L = jitChol(K)[0].T # lower triangular
            alpha = LA.cho_solve((L,True),Yvec)
            alpha2D = SP.reshape(alpha,(self.nt,1))
            Kinv = LA.cho_solve((L,True),SP.eye(self.nt))
            W = Kinv - SP.dot(alpha2D,alpha2D.T)
            self._covar_cache['Yvec'] = Yvec
            self._covar_cache['K'] = K
            self._covar_cache['Kinv'] = Kinv
            self._covar_cache['L'] = L
            self._covar_cache['alpha'] = alpha
            self._covar_cache['W'] = W

        self._covar_cache['hyperparams'] = copy.deepcopy(hyperparams)
        return self._covar_cache
示例#45
0
        spt = f.readline().split()
        m = float(spt[1])
        # met = float(spt[2]) # 2, 3, 4: metal, HI, OVI
        met = float(spt[4]) # 2, 3, 4: metal, HI, OVI
        if(not TOTAL_METAL_MASS):
            if m > 0:
                met = met / m
            else: met = 0.
        if met == 0:
            if(TOTAL_METAL_MASS): z.append(-20)
            else: z.append(-6)
        else:
            if(TOTAL_METAL_MASS): z.append(log10(met))
            else: z.append(log10(met/Z_solar)) # METALS
    x, y = meshgrid(xnodes, ynodes)
    z = reshape(z, (ncells_x, ncells_y)).T
    z2 = ndimage.gaussian_filter(z, sigma=3., order=0)
    cont = plt.contourf(x, y, z2, CONTLEVELS, cmap=plt.get_cmap("Purples"))
    f.close()
    plt.colorbar(ticks=[-5,-3,-1, 1], orientation="horizontal")
    plt.contour(x, y, z2, cont.levels[4::2], colors="#393939", linestyles="solid")
    
#    ax2.callbacks.connect("xlim_changed", update_ax2)
    plt.axhline(5, xnodes[0], xnodes[-1], linestyle=":", color="black")
    plt.plot([rhoth, rhoth], [ynodes[0], ynodes[-1]], "k:")
       
    
#     plt.plot(xnodes, zhist, "k-")

    ax.set_xlim(xnodes[0], xnodes[-1])
    ax.set_ylim(ynodes[0], ynodes[-1])
示例#46
0
def lognet(x, is_sparse, irs, pcs, y, weights, offset, parm, 
          nobs, nvars, jd, vp, cl, ne, nx, nlam, flmin, ulam, 
          thresh, isd, intr, maxit, kopt, family):

    # load shared fortran library
    glmlib = loadGlmLib() 
    
    # 
    noo = y.shape[0]
    if len(y.shape) > 1:
        nc = y.shape[1]
    else:
        nc = 1
        
    if (noo != nobs):
        raise ValueError('x and y have different number of rows in call to glmnet')
    
    if nc == 1:
        classes, sy = scipy.unique(y, return_inverse = True)
        nc = len(classes)
        indexes = scipy.eye(nc, nc)
        y = indexes[sy, :]
    else:
        classes = scipy.arange(nc) + 1 # 1:nc
    #
    if family == 'binomial':
        if nc > 2:
            raise ValueError('More than two classes in y. use multinomial family instead')
        else:
            nc = 1
            y = y[:, [1, 0]]
    #
    if (len(weights) != 0): 
        t = weights > 0
        if ~scipy.all(t):
            t = scipy.reshape(t, (len(y), ))
            y = y[t, :]
            x = x[t, :]
            weights = weights[t]
            nobs = scipy.sum(t)
        else:
            t = scipy.empty([0], dtype = scipy.integer)
        #
        if len(y.shape) == 1:
            mv = len(y)
            ny = 1
        else:    
            mv, ny = y.shape 
            
        y = y*scipy.tile(weights, (1, ny))
    
    #
    if len(offset) == 0:
        offset = y*0
        is_offset = False
    else:
        if len(t) != 0:
            offset = offset[t, :]
        do = offset.shape
        if do[0] != nobs:
            raise ValueError('offset should have the same number of values as observations in binominal/multinomial call to glmnet')
        if nc == 1:
            if do[1] == 1:
                offset = scipy.column_stack((offset, -offset), 1)
            if do[1] > 2:
                raise ValueError('offset should have 1 or 2 columns in binomial call to glmnet')
        if (family == 'multinomial') and (do[1] != nc):
            raise ValueError('offset should have same shape as y in multinomial call to glmnet')
        is_offset = True
  
    # now convert types and allocate memory before calling 
    # glmnet fortran library
    ######################################
    # --------- PROCESS INPUTS -----------
    ######################################
    # force inputs into fortran order and scipy float64
    copyFlag = False
    x = x.astype(dtype = scipy.float64, order = 'F', copy = copyFlag) 
    irs = irs.astype(dtype = scipy.int32, order = 'F', copy = copyFlag)
    pcs = pcs.astype(dtype = scipy.int32, order = 'F', copy = copyFlag)    
    y = y.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)    
    weights = weights.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)    
    offset = offset.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)    
    jd = jd.astype(dtype = scipy.int32, order = 'F', copy = copyFlag)        
    vp = vp.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)    
    cl = cl.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)    
    ulam   = ulam.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)    

    ######################################
    # --------- ALLOCATE OUTPUTS ---------
    ######################################
    # lmu
    lmu = -1
    lmu_r = ctypes.c_int(lmu)
    # a0, ca
    if nc == 1:
        a0   = scipy.zeros([nlam], dtype = scipy.float64)
        ca = scipy.zeros([nx, nlam], dtype = scipy.float64)
    else:
        a0   = scipy.zeros([nc, nlam], dtype = scipy.float64)
        ca   = scipy.zeros([nx, nc, nlam], dtype = scipy.float64)
    # a0
    a0   = a0.astype(dtype = scipy.float64, order = 'F', copy = False)    
    a0_r = a0.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
    # ca    
    ca   = ca.astype(dtype = scipy.float64, order = 'F', copy = False)    
    ca_r = ca.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
    # ia
    ia   = -1*scipy.ones([nx], dtype = scipy.int32)
    ia   = ia.astype(dtype = scipy.int32, order = 'F', copy = False)    
    ia_r = ia.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
    # nin
    nin   = -1*scipy.ones([nlam], dtype = scipy.int32)
    nin   = nin.astype(dtype = scipy.int32, order = 'F', copy = False)    
    nin_r = nin.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
    # dev
    dev   = -1*scipy.ones([nlam], dtype = scipy.float64)
    dev   = dev.astype(dtype = scipy.float64, order = 'F', copy = False)    
    dev_r = dev.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
    # alm
    alm   = -1*scipy.ones([nlam], dtype = scipy.float64)
    alm   = alm.astype(dtype = scipy.float64, order = 'F', copy = False)    
    alm_r = alm.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
    # nlp
    nlp = -1
    nlp_r = ctypes.c_int(nlp)
    # jerr
    jerr = -1
    jerr_r = ctypes.c_int(jerr)
    # dev0
    dev0 = -1
    dev0_r = ctypes.c_double(dev0)

    #  ###################################
    #   main glmnet fortran caller
    #  ###################################  
    if is_sparse:
        # sparse lognet
        glmlib.splognet_( 
              ctypes.byref(ctypes.c_double(parm)), 
              ctypes.byref(ctypes.c_int(nobs)),
              ctypes.byref(ctypes.c_int(nvars)),
              ctypes.byref(ctypes.c_int(nc)),
              x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
              pcs.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),  
              irs.ctypes.data_as(ctypes.POINTER(ctypes.c_int)), 
              y.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), 
              offset.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), 
              jd.ctypes.data_as(ctypes.POINTER(ctypes.c_int)), 
              vp.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), 
              cl.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), 
              ctypes.byref(ctypes.c_int(ne)), 
              ctypes.byref(ctypes.c_int(nx)), 
              ctypes.byref(ctypes.c_int(nlam)), 
              ctypes.byref(ctypes.c_double(flmin)), 
              ulam.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), 
              ctypes.byref(ctypes.c_double(thresh)), 
              ctypes.byref(ctypes.c_int(isd)), 
              ctypes.byref(ctypes.c_int(intr)), 
              ctypes.byref(ctypes.c_int(maxit)), 
              ctypes.byref(ctypes.c_int(kopt)), 
              ctypes.byref(lmu_r),
              a0_r, 
              ca_r, 
              ia_r, 
              nin_r, 
              ctypes.byref(dev0_r),
              dev_r,
              alm_r, 
              ctypes.byref(nlp_r), 
              ctypes.byref(jerr_r)
              )
    else:
        # call fortran lognet routine
        glmlib.lognet_( 
              ctypes.byref(ctypes.c_double(parm)), 
              ctypes.byref(ctypes.c_int(nobs)),
              ctypes.byref(ctypes.c_int(nvars)),
              ctypes.byref(ctypes.c_int(nc)),
              x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), 
              y.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), 
              offset.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), 
              jd.ctypes.data_as(ctypes.POINTER(ctypes.c_int)), 
              vp.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), 
              cl.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), 
              ctypes.byref(ctypes.c_int(ne)), 
              ctypes.byref(ctypes.c_int(nx)), 
              ctypes.byref(ctypes.c_int(nlam)), 
              ctypes.byref(ctypes.c_double(flmin)), 
              ulam.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), 
              ctypes.byref(ctypes.c_double(thresh)), 
              ctypes.byref(ctypes.c_int(isd)), 
              ctypes.byref(ctypes.c_int(intr)), 
              ctypes.byref(ctypes.c_int(maxit)), 
              ctypes.byref(ctypes.c_int(kopt)), 
              ctypes.byref(lmu_r),
              a0_r, 
              ca_r, 
              ia_r, 
              nin_r, 
              ctypes.byref(dev0_r),
              dev_r,
              alm_r, 
              ctypes.byref(nlp_r), 
              ctypes.byref(jerr_r)
              )
   
    #  ###################################
    #   post process results
    #  ###################################  
    
    # check for error
    if (jerr_r.value > 0):
        raise ValueError("Fatal glmnet error in library call : error code = ", jerr_r.value)
    elif (jerr_r.value < 0):
        print("Warning: Non-fatal error in glmnet library call: error code = ", jerr_r.value)
        print("Check results for accuracy. Partial or no results returned.")
    
    # clip output to correct sizes
    lmu = lmu_r.value
    if nc == 1:
        a0 = a0[0:lmu]
        ca = ca[0:nx, 0:lmu]    
    else:
        a0 = a0[0:nc, 0:lmu]
        ca = ca[0:nx, 0:nc, 0:lmu]    
    ia = ia[0:nx]
    nin = nin[0:lmu]
    dev = dev[0:lmu]
    alm = alm[0:lmu]
    
    # ninmax
    ninmax = max(nin)
    # fix first value of alm (from inf to correct value)
    if ulam[0] == 0.0:
        t1 = scipy.log(alm[1])
        t2 = scipy.log(alm[2])
        alm[0] = scipy.exp(2*t1 - t2)        
    # create return fit dictionary
     
    if family == 'multinomial':
        a0 = a0 - scipy.tile(scipy.mean(a0), (nc, 1))
        dfmat = a0.copy()
        dd = scipy.array([nvars, lmu], dtype = scipy.integer)
        beta_list = list()
        if ninmax > 0:
            # TODO: is the reshape here done right?
            ca = scipy.reshape(ca, (nx, nc, lmu))
            ca = ca[0:ninmax, :, :]
            ja = ia[0:ninmax] - 1    # ia is 1-indexed in fortran
            oja = scipy.argsort(ja)
            ja1 = ja[oja]
            df = scipy.any(scipy.absolute(ca) > 0, axis=1)
            df = scipy.sum(df)            
            df = scipy.reshape(df, (1, df.size))
            for k in range(0, nc):
                ca1 = scipy.reshape(ca[:,k,:], (ninmax, lmu))
                cak = ca1[oja,:]
                dfmat[k, :] = scipy.sum(scipy.absolute(cak) > 0, axis = 0)
                beta = scipy.zeros([nvars, lmu], dtype = scipy.float64)
                beta[ja1, :] = cak
                beta_list.append(beta)
        else:
            for k in range(0, nc):
                dfmat[k, :] = scipy.zeros([1, lmu], dtype = scipy.float64)
                beta_list.append(scipy.zeros([nvars, lmu], dtype = scipy.float64))
            #
            df = scipy.zeros([1, lmu], dtype = scipy.float64)
        #
        if kopt == 2:
            grouped = True
        else:
            grouped = False
        #        
        fit = dict()
        fit['a0'] = a0
        fit['label'] = classes
        fit['beta'] = beta_list
        fit['dev'] = dev
        fit['nulldev'] = dev0_r.value
        fit['dfmat']= dfmat
        fit['df'] = df
        fit['lambdau'] = alm
        fit['npasses'] = nlp_r.value
        fit['jerr'] = jerr_r.value
        fit['dim'] = dd
        fit['grouped'] = grouped
        fit['offset'] = is_offset
        fit['class'] = 'multnet'    
    else:
        dd = scipy.array([nvars, lmu], dtype = scipy.integer)
        if ninmax > 0:
            ca = ca[0:ninmax,:];
            df = scipy.sum(scipy.absolute(ca) > 0, axis = 0);
            ja = ia[0:ninmax] - 1; # ia is 1-indexes in fortran
            oja = scipy.argsort(ja)
            ja1 = ja[oja]
            beta = scipy.zeros([nvars, lmu], dtype = scipy.float64);
            beta[ja1, :] = ca[oja, :];
        else:
            beta = scipy.zeros([nvars,lmu], dtype = scipy.float64);
            df = scipy.zeros([1,lmu], dtype = scipy.float64);
        #
        fit = dict()
        fit['a0'] = a0
        fit['label'] = classes
        fit['beta'] = beta
        fit['dev'] = dev
        fit['nulldev'] = dev0_r.value
        fit['df'] = df
        fit['lambdau'] = alm
        fit['npasses'] = nlp_r.value
        fit['jerr'] = jerr_r.value
        fit['dim'] = dd
        fit['offset'] = is_offset
        fit['class'] = 'lognet'  
    
    
    #  ###################################
    #   return to caller
    #  ###################################  

    return fit
示例#47
0
    # initialize covariance functions
    covar_c = lowrank.LowRankCF(n_dimensions=n_latent)
    covar_s = lowrank.LowRankCF(n_dimensions=n_latent)
    covar_r = linear.LinearCF(n_dimensions=n_dimensions)
    covar_o = diag.DiagIsoCF(n_dimensions = n_dimensions)

    # true parameters
    X_c = SP.random.randn(n_tasks,n_latent)
    X_s = SP.random.randn(n_tasks,n_latent)
    X_r = SP.random.randn(n_train,n_dimensions)/SP.sqrt(n_dimensions)
    R = SP.dot(X_r,X_r.T)
    C = SP.dot(X_c,X_c.T)
    Sigma = SP.dot(X_s,X_s.T)
    K = SP.kron(C,R) + SP.kron(Sigma,SP.eye(n_train))
    y = SP.random.multivariate_normal(SP.zeros(n_tasks*n_train),K)
    Y = SP.reshape(y,(n_train,n_tasks),order='F')
    
    # initialization parameters
    hyperparams, Ifilter, bounds = initialize.init('GPkronsum_LIN',Y.T,X_r,{'n_c':n_latent, 'n_sigma':n_latent})
    
    # initialize gp and its covariance functions
    covar_r.X = X_r
    covar_o.X = X_r
    covar_o._K = SP.eye(n_train)
    covar_s.X = hyperparams['X_s']
    covar_c.X = hyperparams['X_c']
    gp = gp_kronsum.KronSumGP(covar_c=covar_c, covar_r=covar_r, covar_s=covar_s, covar_o=covar_o)
    gp.setData(Y=Y)  

    # optimize hyperparameters
    t_start = time.time()
Target = "Jupiter"

#Retrieve Target Parameters and create data paths
J = CF.Target_Parameters(
    "f:/Astronomy/Python Play/SpectroPhotometry/Spectroscopy/Target_Parameters.txt"
)
J.loadtargetparams(Target)
JupPath = CF.built_path(J)
JupPath.spectra(DateUT)

#Load response calibration and solar reference spectrum
Response = scipy.fromfile(file="../PolluxResponse20150123UT.txt",
                          dtype=float,
                          count=-1,
                          sep=" ")
Response = scipy.reshape(Response, [Response.size / 2, 2])
Response[:, 0] = Response[:, 0] / 10.
Response[:, 1] = pyasl.smooth(Response[:, 1], 3, 'flat')
MasterDispersion = (Response[(Response.size / 2 - 1), 0] -
                    Response[0, 0]) / (Response.size / 2 - 1)

Ref_g2v = scipy.loadtxt(JupPath.reference_path + J.SpecType,
                        dtype=float,
                        skiprows=3,
                        usecols=(0, 1))
Ref_g2v[:, 0] = Ref_g2v[:, 0] / 10.
Ref_g2v[:, 1] = pyasl.smooth(Ref_g2v[:, 1], 3, 'flat')

#Load comparison albedo spectrum from Karkoschka, 1994 (1993 observations)
Jupiter_Karkoschka1993 = scipy.fromfile(
    file=
示例#49
0
 def _forwardImplementation(self, inbuf, outbuf):
     outbuf += dot(reshape(self.params, (self.outdim, self.indim)), inbuf)
示例#50
0
    def _LMLgrad_covar_debug(self, covar):

        assert self.N * self.P < 5000, 'gp2kronSum:: N*P>=5000'

        y = SP.reshape(self.Y, (self.N * self.P), order='F')
        V = SP.kron(SP.eye(self.P), self.F)

        # calc K
        XX = SP.dot(self.Xr, self.Xr.T)
        K = SP.kron(self.Cr.K(), XX)
        K += SP.kron(self.Cn.K() + self.offset * SP.eye(self.P),
                     SP.eye(self.N))

        # inverse of K
        cholK = LA.cholesky(K)
        Ki = LA.cho_solve((cholK, False), SP.eye(self.N * self.P))

        # Areml and inverse
        KiV = SP.dot(Ki, V)
        Areml = SP.dot(V.T, KiV)
        cholAreml = LA.cholesky(Areml)
        Areml_i = LA.cho_solve((cholAreml, False), SP.eye(self.K * self.P))

        # effect sizes and z
        b = SP.dot(Areml_i, SP.dot(V.T, SP.dot(Ki, y)))
        Vb = SP.dot(V, b)
        z = y - Vb
        Kiz = SP.dot(Ki, z)
        Kiy = SP.dot(Ki, y)
        zKiV = SP.dot(z.T, KiV)

        if covar == 'Cr': n_params = self.Cr.getNumberParams()
        elif covar == 'Cn': n_params = self.Cn.getNumberParams()

        RV = SP.zeros(n_params)

        for i in range(n_params):
            #0. calc grad_i
            if covar == 'Cr':
                C = self.Cr.Kgrad_param(i)
                Kgrad = SP.kron(C, XX)
            elif covar == 'Cn':
                C = self.Cn.Kgrad_param(i)
                Kgrad = SP.kron(C, SP.eye(self.N))

            #0a. Areml grad
            Areml_grad = -SP.dot(KiV.T, SP.dot(Kgrad, KiV))

            #0b. beta grad
            b_grad = -SP.dot(Areml_i, SP.dot(Areml_grad, b))
            b_grad -= SP.dot(Areml_i, SP.dot(KiV.T, SP.dot(Kgrad, Kiy)))

            #1. der of log det
            RV[i] = 0.5 * (Ki * Kgrad).sum()

            #2. der of quad form
            RV[i] -= 0.5 * (Kiz * SP.dot(Kgrad, Kiz)).sum()
            RV[i] += SP.dot(zKiV, b_grad).sum()

            #3. der of reml term
            RV[i] += 0.5 * (Areml_i * Areml_grad).sum()

        return RV
示例#51
0
 def _backwardImplementation(self, outerr, inerr, inbuf):
     inerr += dot(reshape(self.params, (self.outdim, self.indim)).T, outerr)
     ds = self.derivs
     ds += outer(inbuf, outerr).T.flatten()
示例#52
0
    def _update_cache(self):
        """
        Update cache
        """
        cov_params_have_changed = self.Cr.params_have_changed or self.Cn.params_have_changed

        if self.Xr_has_changed:
            start = TIME.time()
            """ Row SVD on small matrix """
            Ug, Sgh, Vg = NLA.svd(self.Xr, full_matrices=0)
            I = Sgh < self.tol
            if I.any():
                warnings.warn(
                    'Xr has dependent columns, dimensionality reduced')
                Sgh = Sgh[~I]
                Ug = Ug[:, ~I]
                Vg = SP.eye(Sgh.shape[0])
                Xr = Ug * Sgh[SP.newaxis, :]
                self.set_Xr(Xr)
            self.cache['Sg'] = Sgh**2
            self.cache['Wr'] = Ug.T
            self.cache['Vg'] = Vg
            self.cache['trXrXr'] = self.cache['Sg'].sum()

        if cov_params_have_changed:
            start = TIME.time()
            """ Col SVD on big matrix """
            self.cache['Sn'], Un = LA.eigh(self.Cn.K() +
                                           self.offset * SP.eye(self.P))
            self.cache['Lc'] = (self.cache['Sn']**(-0.5))[:, SP.newaxis] * Un.T
            E = SP.reshape(self.Cr.getParams(), (self.P, self.rank), order='F')
            Estar = SP.dot(self.cache['Lc'], E)
            Ue, Seh, Ve = NLA.svd(Estar, full_matrices=0)
            self.cache['Se'] = Seh**2
            self.cache['Wc'] = Ue.T

        if cov_params_have_changed or self.Xr_has_changed:
            """ S """
            self.cache['s'] = SP.kron(1. / self.cache['Se'],
                                      1. / self.cache['Sg']) + 1
            self.cache['d'] = 1. / self.cache['s']
            self.cache['D'] = SP.reshape(self.cache['d'], (self.S, self.rank),
                                         order='F')

        if self.Xr_has_changed or self.Y_has_changed:
            """ phenos transf """
            self.cache['WrLrY'] = SP.dot(self.cache['Wr'], self.Y)
            XrLrY = SP.dot(self.Xr.T, self.Y)
            self.cache['XrXrLrY'] = SP.dot(self.Xr, XrLrY)
            self.cache['WrXrXrLrY'] = (self.cache['Sg']**
                                       0.5)[:, SP.newaxis] * SP.dot(
                                           self.cache['Vg'], XrLrY)

        if (self.Xr_has_changed or self.F_has_changed) and self.F is not None:
            """ F transf """
            self.cache['FF'] = SP.dot(self.F.T, self.F)
            self.cache['WrLrF'] = SP.dot(self.cache['Wr'], self.F)
            XrLrF = SP.dot(self.Xr.T, self.F)
            self.cache['XrXrLrF'] = SP.dot(self.Xr, XrLrF)
            self.cache['FLrXrXrLrF'] = SP.dot(self.F.T, self.cache['XrXrLrF'])
            self.cache['WrXrXrLrF'] = (self.cache['Sg']**
                                       0.5)[:, SP.newaxis] * SP.dot(
                                           self.cache['Vg'], XrLrF)

        if (self.F_has_changed or self.Y_has_changed) and self.F is not None:
            self.cache['FY'] = SP.dot(self.F.T, self.Y)

        if (self.Xr_has_changed or self.F_has_changed
                or self.Y_has_changed) and self.F is not None:
            self.cache['FXrXrLrY'] = SP.dot(self.F.T, self.cache['XrXrLrY'])

        if cov_params_have_changed or self.Y_has_changed:
            """ phenos transf """
            self.cache['LY'] = SP.dot(self.Y, self.cache['Lc'].T)
            self.cache['WrLY'] = SP.dot(self.cache['WrLrY'],
                                        self.cache['Lc'].T)
            self.cache['WLY'] = SP.dot(self.cache['WrLY'], self.cache['Wc'].T)
            self.cache['XrXrLY'] = SP.dot(self.cache['XrXrLrY'],
                                          self.cache['Lc'].T)
            self.cache['WrXrXrLY'] = SP.dot(self.cache['WrXrXrLrY'],
                                            self.cache['Lc'].T)

        if cov_params_have_changed and self.F is not None:
            """ A transf """
            # A for now is just I
            self.cache['LcA'] = self.cache['Lc']
            self.cache['Cni'] = SP.dot(self.cache['Lc'].T, self.cache['Lc'])
            self.cache['LcALcA'] = self.cache['Cni']
            self.cache['WcLcA'] = SP.dot(self.cache['Wc'], self.cache['LcA'])

        if cov_params_have_changed or self.Xr_has_changed or self.Y_has_changed:
            self.cache['DWLY'] = self.cache['D'] * self.cache['WLY']
            self.cache['SgDWLY'] = self.cache[
                'Sg'][:, SP.newaxis] * self.cache['DWLY']
            smartSum(self.time, 'cache_colSVDpRot', TIME.time() - start)
            smartSum(self.count, 'cache_colSVDpRot', 1)

        if (cov_params_have_changed or self.Xr_has_changed
                or self.F_has_changed) and self.F is not None:
            self.cache['WLV'] = SP.kron(self.cache['WcLcA'],
                                        self.cache['WrLrF'])
            self.cache['DWLV'] = self.cache['d'][:, SP.
                                                 newaxis] * self.cache['WLV']
            self.cache['DWLV_t'] = SP.reshape(
                self.cache['DWLV'], (self.S, self.rank, self.P * self.K),
                order='F')
            self.cache['SgDWLV_t'] = self.cache[
                'Sg'][:, SP.newaxis, SP.newaxis] * self.cache['DWLV_t']
            self.cache['Areml'] = SP.kron(self.cache['LcALcA'],
                                          self.cache['FF'])
            self.cache['Areml'] -= SP.dot(self.cache['WLV'].T,
                                          self.cache['DWLV'])
            self.cache['Areml_chol'] = LA.cholesky(self.cache['Areml']).T
            # TODO: handle pseudo inverses
            self.cache['Areml_inv'] = LA.cho_solve(
                (self.cache['Areml_chol'], True), SP.eye(self.K * self.P))

        if (cov_params_have_changed or self.Xr_has_changed
                or self.Y_has_changed
                or self.F_has_changed) and self.F is not None:
            VKiY = SP.dot(self.cache['FY'], self.cache['Cni'])
            #TODO: have not controlled factorization in the following line
            VKiY -= SP.dot(SP.dot(self.cache['WrLrF'].T, self.cache['DWLY']),
                           self.cache['WcLcA'])
            self.cache['b'] = SP.dot(
                self.cache['Areml_inv'],
                SP.reshape(VKiY, (VKiY.size, 1), order='F'))
            self.cache['B'] = SP.reshape(self.cache['b'], (self.K, self.P),
                                         order='F')
            self.cache['BLc'] = SP.dot(self.cache['B'], self.cache['Lc'].T)
            self.cache['BLcWc'] = SP.dot(self.cache['BLc'], self.cache['Wc'].T)
            self.cache['Z'] = self.Y - SP.dot(self.F, self.cache['B'])
            self.cache['FZ'] = self.cache['FY'] - SP.dot(
                self.cache['FF'], self.cache['B'])
            self.cache['LZ'] = self.cache['LY'] - SP.dot(
                self.F, self.cache['BLc'])
            self.cache['WrLZ'] = self.cache['WrLY'] - SP.dot(
                self.cache['WrLrF'], self.cache['BLc'])
            self.cache['WLZ'] = self.cache['WLY'] - SP.dot(
                self.cache['WrLrF'], self.cache['BLcWc'])
            self.cache['DWLZ'] = self.cache['D'] * self.cache['WLZ']
            self.cache['SgDWLZ'] = self.cache[
                'Sg'][:, SP.newaxis] * self.cache['DWLZ']
            self.cache['XrXrLZ'] = self.cache['XrXrLY'] - SP.dot(
                self.cache['XrXrLrF'], self.cache['BLc'])
            self.cache['WrXrXrLZ'] = self.cache['WrXrXrLY'] - SP.dot(
                self.cache['WrXrXrLrF'], self.cache['BLc'])
            VKiZ = SP.dot(self.cache['FZ'], self.cache['Cni'])
            VKiZ -= SP.dot(self.cache['WrLrF'].T,
                           SP.dot(self.cache['DWLZ'], self.cache['WcLcA']))
            self.cache['vecVKiZ'] = SP.reshape(VKiZ, (self.K * self.P, 1),
                                               order='F')

        if self.F is None:
            """ Then Z=Y """
            self.cache['LZ'] = self.cache['LY']
            self.cache['WLZ'] = self.cache['WLY']
            self.cache['DWLZ'] = self.cache['DWLY']
            self.cache['XrXrLZ'] = self.cache['XrXrLY']
            self.cache['SgDWLZ'] = self.cache['SgDWLY']
            self.cache['WrXrXrLZ'] = self.cache['WrXrXrLY']
            self.cache['WrLZ'] = self.cache['WrLY']

        self.Y_has_changed = False
        self.F_has_changed = False
        self.Xr_has_changed = False
        self.Cr.params_have_changed = False
        self.Cn.params_have_changed = False
def write_gmm_data_file_depth(model_name, mag, dist, depth, result_type,
                              periods, file_out,
                              component_type="AVERAGE_HORIZONTAL",):
    """
    Create a file of input and output parameters for the sommerville GMM.

    params:
      model_name: The ground motion model, as a string.
      mag: dictionary, key - the mag column name, values, the mag vectors,
           as a list
      dist: dictionary, key - the distance column name, value,
            the distance vectors, as a list.
      depth: depth in km.
      result_type: MEAN or TOTAL_STDDEV
      periods: A list of periods requiring SA values.
               The first value has to be 0.0.

       Mag, distance and periods will be iterated over to give a single SA for
       each combination.
       file_out: The file name and location of the produced data file.
    """
    assert periods[0] == 0.0
    handle = open(file_out, 'wb')
    writer = csv.writer(handle, delimiter=',', quoting=csv.QUOTE_NONE)

    # write title
    title = [depth[0], mag[0], dist[0], 'result_type', 'component_type'] + \
        periods[1:] + ['pga']
    writer.writerow(title)

    # prepare the coefficients
    model = Ground_motion_specification(model_name)
    coeff = model.calc_coefficient(periods)
    coeff = reshape(coeff, (coeff.shape[0], 1, 1, coeff.shape[1]))
    sigma_coeff = model.calc_sigma_coefficient(periods)
    sigma_coeff = reshape(sigma_coeff, (sigma_coeff.shape[0], 1, 1,
                                        sigma_coeff.shape[1]))

    # Iterate
    for depi in depth[1]:
        for magi in mag[1]:
            for disti in dist[1]:
                dist_args = {'mag': array([[[magi]]]),
                             dist[0]: array([[[disti]]]),
                             'depth': array([[[depi]]]),
                             'coefficient': coeff,
                             'sigma_coefficient': sigma_coeff}
                log_mean, log_sigma = model.distribution(**dist_args)
                sa_mod = list(log_mean.reshape(-1))
                sa_mod = [math.exp(x) for x in sa_mod]
                sigma_mod = list(log_sigma.reshape(-1))
                if result_type == 'MEAN':
                    row = [depi, magi, disti, result_type, component_type] + \
                        sa_mod[1:] + \
                          [sa_mod[0]]
                else:
                    row = [depi, magi, disti, result_type, component_type] + \
                        sigma_mod[1:] + \
                          [sigma_mod[0]]

                writer.writerow(row)
    handle.close()
示例#54
0
def main(argv):
    # load preprocessed data samples
    print 'loading data...\t',
    #data_train, data_test = genData() #load('../data/vanhateren.npz')
    data_train_0,data_train_1,data_train_2, data_test = load_data_BS("data/changedetection\\baseline\highway\orignal_color.pkl")
    img = Image.open("data/changedetection\\baseline\highway/gt001367.png")
    print "Doie : " , (asarray(img)[:,:]).shape
    groundtruth = (((asarray(img)[:,:])/255.0 > 0.5) * 1).flatten()
    
    #groundtruth = (((asarray(img)[:,:])/255.0 > 0.5) * 1).flatten()
    print '[DONE]'
    


    # remove DC component (first component)
    # data_train = data['train'][1:, :]
    # data_test = data['test'][1:, :]

    # create 1st layer
    dbn = DBN(GaussianRBM(num_visibles=data_train_0.shape[0], num_hiddens=8))
    dbn1 = DBN(GaussianRBM(num_visibles=data_train_1.shape[0], num_hiddens=8))
    dbn2 = DBN(GaussianRBM(num_visibles=data_train_2.shape[0], num_hiddens=8))
    
    dbn[0].learning_rate = 0.0001
    dbn1[0].learning_rate = 0.0001
    dbn2[0].learning_rate = 0.0001


    # train 1st layer
    print 'training...\t',
    dbn.train(data_train_0, num_epochs=25, batch_size=1,shuffle=False)
    dbn1.train(data_train_1, num_epochs=25, batch_size=1,shuffle=False)
    dbn2.train(data_train_2, num_epochs=25, batch_size=1,shuffle=False)
    print '[DONE]'

    data_test_0 = ((data_test.T)[:,::3]).T
    data_test_1 = ((data_test.T)[:,1::3]).T
    data_test_2 = ((data_test.T)[:,2::3]).T
    
    Ndat = 20 #data_test_0.shape[1]
    Nsteps = 5
    # evaluate 1st layer
    print 'evaluating 1...\t',
    dataout = zeros(x_*y_)
    # #datasub = zeros(x_*y_)
    for point in xrange(Ndat):
        #X = asmatrix(data_test_0[:,point]).T
        X = asmatrix(data_test_0[:,-1]).T
        #dataout = vstack((dataout,X.flatten()))
        #print "testing:", X.shape
        for recstep in xrange(Nsteps): 
            Y = dbn[0].forward(X) # self.activ(1)
            X = dbn[0].backward(Y,X)
        #print "S hsape:", X.shape
        #dataout = vstack((dataout,X.flatten()))
        dataout = vstack((dataout,subtract(asarray(X),data_test_0[:,-1],asarray(dbn[0].vsigma),point+1)))
    
    print 'evaluating 2...\t',
    dataout1 = zeros(x_*y_)
    # #datasub = zeros(x_*y_)
    for point in xrange(Ndat):
        #X = asmatrix(data_test_1[:,point]).T
        X = asmatrix(data_test_1[:,-1]).T
        #dataout1 = vstack((dataout1,X.flatten()))
        #print "testing:", X.shape
        for recstep in xrange(Nsteps): 
            Y = dbn1[0].forward(X) # self.activ(1)
            X = dbn1[0].backward(Y,X)
        #print "S hsape:", X.shape
        #dataout1 = vstack((dataout1,X.flatten()))
        dataout1 = vstack((dataout1,subtract(asarray(X),data_test_1[:,-1],asarray(dbn1[0].vsigma),point+1)))
    
    
    print 'evaluating 3...\t',
    dataout2 = zeros(x_*y_)
    # #datasub = zeros(x_*y_)
    for point in xrange(Ndat):
        #X = asmatrix(data_test_2[:,point]).T
        X = asmatrix(data_test_2[:,-1]).T
        #dataout2 = vstack((dataout2,X.flatten()))
        #print "testing:", X.shape
        for recstep in xrange(Nsteps): 
            Y = dbn2[0].forward(X) # self.activ(1)
            X = dbn2[0].backward(Y,X)
        #print "S hsape:", X.shape
        #dataout2 = vstack((dataout2,X.flatten()))
        dataout2 = vstack((dataout2,subtract(asarray(X),data_test_2[:,-1],asarray(dbn2[0].vsigma),point+1)))
    
    # plt.imshow((reshape(data_test[::3,5],(x_,y_))), cmap = cm.Greys_r, interpolation ="nearest")
    # plt.axis('off')     
    # plt.show()

    plt.figure(1)
    for i in range(Ndat):
        plt.subplot(5,5,i+1)
        d = multiply(asarray(dataout[i+1,:]),asarray(dataout1[i+1,:]),asarray(dataout2[i+1,:]))
        d = mod(d+1,2)
        f_measure(d,groundtruth)
        #print "Image Example Fmeaure: ",i," : ", f_measure(d,groundtruth) * 100
        # d[0::3] = asarray(dataout[i+1,:])
        # d[1::3] = asarray(dataout1[i+1,:])
        # d[2::3] = asarray(dataout2[i+1,:])
        # d[:,:,0] = (reshape(asarray(dataout[i+1,:]),(x_,y_)))
        # d[:,:,1] = (reshape(asarray(dataout1[i+1,:]),(x_,y_)))
        # d[:,:,2] = (reshape(asarray(dataout2[i+1,:]),(x_,y_)))
        plt.imshow(reshape(d,(x_,y_)), cmap = cm.Greys_r, interpolation ="nearest")
        plt.axis('off')     
    plt.figure(2)
    

    for k in range(8):
        plt.subplot(4,2,k+1)
        d = zeros((x_*y_*3))
        d[0::3] = asarray(dbn[0].W[:,k].flatten())
        d[1::3] = asarray(dbn1[0].W[:,k].flatten())
        d[2::3] = asarray(dbn2[0].W[:,k].flatten())
        plt.imshow(reshape(d,(x_,y_,3)))#, cmap = cm.Greys_r, interpolation ="nearest")
        plt.axis('off')     
    # plt.figure()
    # plt.imshow((reshape(dbn[0].vsigma[:19200],(x_,y_))))
    
    # plt.figure(2)
    # plt.imshow((reshape(dbn[0].vsigma[19200:19200*2],(x_,y_))))
    
    # plt.figure(3)
    # plt.imshow((reshape(dbn[0].vsigma[19200*2:19200*3],(x_,y_))))
    
    plt.figure(3)
    print type(dbn[0].vsigma)
    plt.imshow(reshape(asarray(dbn[0].vsigma),(x_,y_)))
    plt.show()    

    
    print dbn[0].vsigma
    p.show()    
示例#55
0
    def _LMLgrad_covar(self, covar, **kw_args):
        """
        calculates LMLgrad for covariance parameters
        """
        # precompute some stuff
        if covar == 'Cr':
            trR = self.cache['trXrXr']
            RLZ = self.cache['XrXrLZ']
            SrDWLZ = self.cache['SgDWLZ']
            WrRLZ = self.cache['WrXrXrLZ']
            diagSr = self.cache['Sg']
            n_params = self.Cr.getNumberParams()
            if self.F is not None:
                SrDWLY = self.cache['SgDWLY']
                WrRLY = self.cache['WrXrXrLY']
                SrDWLV_t = self.cache['SgDWLV_t']
                WrRLF = self.cache['WrXrXrLrF']
                FRF = self.cache['FLrXrXrLrF']
                FRLrY = self.cache['FXrXrLrY']
        elif covar == 'Cn':
            trR = self.N
            RLZ = self.cache['LZ']
            SrDWLZ = self.cache['DWLZ']
            WrRLZ = self.cache['WrLZ']
            diagSr = SP.ones(self.S)
            n_params = self.Cn.getNumberParams()
            if self.F is not None:
                SrDWLY = self.cache['DWLY']
                WrRLY = self.cache['WrLY']
                SrDWLV = self.cache['DWLV']
                WrRLF = self.cache['WrLrF']
                SrDWLV_t = self.cache['DWLV_t']
                FRF = self.cache['FF']
                FRLrY = self.cache['FY']

        # fill gradient vector
        RV = SP.zeros(n_params)
        for i in range(n_params):

            #0. calc LCL
            start = TIME.time()
            if covar == 'Cr': C = self.Cr.Kgrad_param(i)
            elif covar == 'Cn': C = self.Cn.Kgrad_param(i)
            LCL = SP.dot(self.cache['Lc'], SP.dot(C, self.cache['Lc'].T))
            LLCLL = SP.dot(self.cache['Lc'].T, SP.dot(LCL, self.cache['Lc']))
            LCLW = SP.dot(LCL, self.cache['Wc'].T)
            WLCLW = SP.dot(self.cache['Wc'], LCLW)

            CoRLZ = SP.dot(RLZ, LCL.T)
            CoSrDWLZ = SP.dot(SrDWLZ, WLCLW.T)
            WCoRLZ = SP.dot(WrRLZ, LCLW)

            if self.F is not None:
                WcCLcA = SP.dot(SP.dot(self.cache['Wc'], LCL),
                                self.cache['LcA'])
                CoSrDWLY = SP.dot(SrDWLY, WLCLW.T)
                DCoSrDWLY = self.cache['D'] * CoSrDWLY
                WCoRLY = SP.dot(WrRLY, LCLW)
                DWCoRLY = self.cache['D'] * WCoRLY

                #0a. grad of Areml
                if 1:
                    Areml_grad = SP.dot(
                        SP.kron(WcCLcA, WrRLF).T, self.cache['DWLV'])
                else:
                    Areml_grad = SP.tensordot(SP.tensordot(
                        WrRLF, self.cache['DWLV_t'], axes=(0, 0)),
                                              WcCLcA,
                                              axes=(1, 0))
                    # and then resize...
                Areml_grad += Areml_grad.T
                Areml_grad -= SP.kron(LLCLL, FRF)  #TODO: think about LLCLL
                CoSrDWLV_t = SP.tensordot(SrDWLV_t, WLCLW, axes=(1, 1))
                Areml_grad -= SP.tensordot(self.cache['DWLV_t'],
                                           CoSrDWLV_t,
                                           axes=([0, 1], [0, 2]))

                #0b. grad of beta
                B_grad1 = -SP.dot(FRLrY, LLCLL)
                B_grad1 -= SP.dot(SP.dot(self.cache['WrLrF'].T, DCoSrDWLY),
                                  self.cache['WcLcA'])
                B_grad1 += SP.dot(SP.dot(WrRLF.T, self.cache['DWLY']), WcCLcA)
                B_grad1 += SP.dot(SP.dot(self.cache['WrLrF'].T, DWCoRLY),
                                  self.cache['WcLcA'])
                b_grad = SP.reshape(B_grad1, (self.K * self.P, 1), order='F')
                b_grad -= SP.dot(Areml_grad, self.cache['b'])
                b_grad = SP.dot(self.cache['Areml_inv'], b_grad)

            #1. der of log det
            start = TIME.time()
            trC = LCL.diagonal().sum()
            RV[i] = trC * trR
            RV[i] -= SP.dot(self.cache['d'], SP.kron(WLCLW.diagonal(), diagSr))
            smartSum(self.time, 'lmlgrad_trace', TIME.time() - start)
            smartSum(self.count, 'lmlgrad_trace', 1)

            #2. der of quad form
            start = TIME.time()
            RV[i] -= SP.sum(self.cache['LZ'] * CoRLZ)
            RV[i] -= SP.sum(self.cache['DWLZ'] * CoSrDWLZ)
            RV[i] += 2 * SP.sum(self.cache['DWLZ'] * WCoRLZ)
            if self.F is not None:
                RV[i] -= 2 * SP.dot(self.cache['vecVKiZ'].T, b_grad)
            smartSum(self.time, 'lmlgrad_quadform', TIME.time() - start)
            smartSum(self.count, 'lmlgrad_quadform', 1)

            if self.F is not None:
                #3. reml term
                RV[i] += (self.cache['Areml_inv'] * Areml_grad).sum()

            RV[i] *= 0.5

        return RV
示例#56
0
def plot3Dslicempl(geodata, surfs, vbounds, titlestr='', time=0, gkey=None, cmap=defmap3d, ax=None, fig=None, method='linear',
                   fill_value=np.nan, view=None, units='', colorbar=False):
    """ This function create 3-D slice image given either a surface or list of coordinates to slice through
    Inputs:
    geodata - A geodata object that will be plotted in 3D
    surfs - This is a three element list. Each element can either be
        altlist - A list of the altitudes that RISR parameter slices will be taken at
        xyvecs- A list of x and y numpy arrays that have the x and y coordinates that the data will be interpolated over. ie, xyvecs=[np.linspace(-100.0,500.0),np.linspace(0.0,600.0)]
    vbounds = a list of bounds for the geodata objec's parameters. ie, vbounds=[500,2000]
    title - A string that holds for the overall image
    ax - A handle for an axis that this will be plotted on.

    Returns a mayavi image with a surface
    """

    assert geodata.coordnames.lower() == 'cartesian'

    datalocs = geodata.dataloc

    xvec = sp.unique(datalocs[:, 0])
    yvec = sp.unique(datalocs[:, 1])
    zvec = sp.unique(datalocs[:, 2])

    assert len(xvec)*len(yvec)*len(zvec) == datalocs.shape[0]

    #determine if the ordering is fortran or c style ordering
    diffcoord = sp.diff(datalocs, axis=0)

    if diffcoord[0, 1] != 0.0:
        ar_ord = 'f'
    elif diffcoord[0, 2] != 0.0:
        ar_ord = 'c'
    elif diffcoord[0, 0] != 0.0:
        if len(np.where(diffcoord[:, 1])[0]) == 0:
            ar_ord = 'f'
        elif len(np.where(diffcoord[:, 2])[0]) == 0:
            ar_ord = 'c'

    matshape = (len(yvec), len(xvec), len(zvec))
    # reshape the arrays into a matricies for plotting
    x, y, z = [sp.reshape(datalocs[:, idim], matshape, order=ar_ord) for idim in range(3)]

    if gkey is None:
        gkey = geodata.datanames()[0]
    porig = geodata.data[gkey][:, time]

    if fig is None:
        fig = plt.figure()

    if ax is None:
        fig.gca(projection='3d')
    #determine if list of slices or surfaces are given

    islists = isinstance(surfs[0], list)
    if isinstance(surfs[0], np.ndarray):
        onedim = surfs[0].ndim == 1
    #get slices for each dimension out
    surflist = []
    if islists or onedim:
        p = np.reshape(porig, matshape, order=ar_ord)
        xslices = surfs[0]
        for isur in xslices:
            indx = sp.argmin(sp.absolute(isur-xvec))
            xtmp = x[:, indx]
            ytmp = y[:, indx]
            ztmp = z[:, indx]
            ptmp = p[:, indx]
            cmapobj = cm.ScalarMappable(cmap=cmap)
            cmapobj.set_array(ptmp)
            cmapobj.set_clim(vbounds)
            rgba = cmapobj.to_rgba(ptmp)
            # make NaNs transparient
            rgba[np.isnan(ptmp), -1] = 0

            surf_h = ax.plot_surface(xtmp, ytmp, ztmp, rstride=1, cstride=1,
                                     facecolors=rgba, linewidth=0,
                                     antialiased=False, shade=False)
            surflist.append(surf_h)
        yslices = surfs[1]
        for isur in yslices:
            indx = sp.argmin(sp.absolute(isur-yvec))
            xtmp = x[indx]
            ytmp = y[indx]
            ztmp = z[indx]
            ptmp = p[indx]
            cmapobj = cm.ScalarMappable(cmap=cmap)
            cmapobj.set_array(ptmp)
            cmapobj.set_clim(vbounds)
            rgba = cmapobj.to_rgba(ptmp)
            # make NaNs transparient
            rgba[np.isnan(ptmp), -1] = 0

            surf_h = ax.plot_surface(xtmp, ytmp, ztmp, rstride=1, cstride=1,
                                     facecolors=rgba, linewidth=0,
                                     antialiased=False, shade=False)
            surflist.append(surf_h)
        zslices = surfs[2]
        for isur in zslices:
            indx = sp.argmin(sp.absolute(isur-zvec))
            xtmp = x[:, :, indx]
            ytmp = y[:, :, indx]
            ztmp = z[:, :, indx]
            ptmp = p[:, :, indx]
            cmapobj = cm.ScalarMappable(cmap=cmap)
            cmapobj.set_array(ptmp)
            cmapobj.set_clim(vbounds)
            rgba = cmapobj.to_rgba(ptmp)
            # make NaNs transparient
            rgba[np.isnan(ptmp), -1] = 0

            surf_h = ax.plot_surface(xtmp, ytmp, ztmp, rstride=1, cstride=1,
                                     facecolors=rgba, linewidth=0,
                                     antialiased=False, shade=False)
            surflist.append(surf_h)
    else:
        # For a general surface.
        xtmp, ytmp, ztmp = surfs[:]
        gooddata = ~np.isnan(porig)
        curparam = porig[gooddata]
        curlocs = datalocs[gooddata]
        new_coords = np.column_stack((xtmp.flatten(), ytmp.flatten(), ztmp.flatten()))
        ptmp = spinterp.griddata(curlocs, curparam, new_coords, method, fill_value)
        cmapobj = cm.ScalarMappable(cmap=cmap)
        cmapobj.set_array(ptmp)
        cmapobj.set_clim(vbounds)
        rgba = cmapobj.to_rgba(ptmp)
        # make NaNs transparient
        rgba[np.isnan(ptmp), -1] = 0

        surf_h = ax.plot_surface(xtmp, ytmp, ztmp, rstride=1, cstride=1,
                                 facecolors=rgba, linewidth=0,
                                 antialiased=False, shade=False)
        surflist.append(surf_h)
    ax.set_title(titlestr)
    ax.set_xlabel('x in km')
    ax.set_ylabel('y in km')
    ax.set_zlabel('z in km')

    if view is not None:
        # order of elevation is changed between matplotlib and mayavi
        ax.view_init(view[1],view[0])
    if colorbar:
        if units == '':
            titlestr = gkey
        else:
            titlstr = gkey +' in ' +units
        cbar = plt.colorbar(cmapobj, ax=ax, orientation='vertical')

        return surflist, cbar
    else:
        return surflist
示例#57
0
def slice2DGD(geod, axstr, slicenum, vbounds=None, time=0, gkey=None, cmap=defmap,
              fig=None, ax=None, title='', cbar=True, m=None):
    """
    This function create 2-D slice image given either a surface or list of coordinates to slice through
    Inputs:
    geodata - A geodata object that will be plotted.
    axstr - A string that specifies the plane that will be ploted.
    slicenum - The index location of that slice in the axis if the data were in a 3-D array.
    vbounds = a list of bounds for the geodata objec's parameters. ie, vbounds=[500,2000]
    time - The index of for the location in time that will be plotted.
    gkey - The name of the data that will be plotted.
    cmap - The color map to be used.
    fig - The figure handle that will be used.
    title - A string that holds for the overall image
    ax - A handle for an axis that this will be plotted on.
    cbar - A bool for creating the color bar, default =True.
    m - A handle for a map object if plotting over one.
    Outputs:
        ploth - The handle for the ploted image.
        cbar - The color bar handle for the image.
    """
    #xyzvecs is the area that the data covers.
    poscoords = ['cartesian','wgs84','enu','ecef']
    assert geod.coordnames.lower() in poscoords

    if geod.coordnames.lower() in ['cartesian','enu','ecef']:
        axdict = {'x':0,'y':1,'z':2}
        veckeys = ['x','y','z']
    elif geod.coordnames.lower() == 'wgs84':
        axdict = {'lat':0,'long':1,'alt':2}# shows which row is this coordinate
        veckeys = ['long','lat','alt']# shows which is the x, y and z axes for plotting

    if type(axstr)==str:
        axis=axstr
    else:
        axis= veckeys[axstr]
    veckeys.remove(axis.lower())
    veckeys.append(axis.lower())
    datacoords = geod.dataloc
    xyzvecs = {l:sp.unique(datacoords[:,axdict[l]]) for l in veckeys}

    #make matrices
    M1,M2 = sp.meshgrid(xyzvecs[veckeys[0]],xyzvecs[veckeys[1]])
    slicevec = sp.unique(datacoords[:,axdict[axis]])
    min_idx = sp.argmin(sp.absolute(slicevec-slicenum))
    slicenum=slicevec[min_idx]
    rec_coords = {axdict[veckeys[0]]:M1.flatten(),axdict[veckeys[1]]:M2.flatten(),
                  axdict[axis]:slicenum*sp.ones(M2.size)}


    new_coords = sp.zeros((M1.size,3))
    #make coordinates
    for ckey in rec_coords.keys():
        new_coords[:,ckey] = rec_coords[ckey]
    #determine the data name
    if gkey is None:
        gkey = geod.data.keys[0]

    # get the data location, first check if the data can be just reshaped then do a
    # search

    sliceindx = slicenum==datacoords[:,axdict[axis]]

    datacoordred = datacoords[sliceindx]
    rstypes = ['C','F','A']
    nfounds = True
    M1dlfl = datacoordred[:,axdict[veckeys[0]]]
    M2dlfl = datacoordred[:,axdict[veckeys[1]]]
    for ir in rstypes:
        M1dl = sp.reshape(M1dlfl,M1.shape,order =ir)
        M2dl = sp.reshape(M2dlfl,M1.shape,order =ir)
        if sp.logical_and(sp.allclose(M1dl,M1),sp.allclose(M2dl,M2)):
            nfounds=False
            break
    if nfounds:
        dataout = geod.datareducelocation(new_coords,geod.coordnames,gkey)[:,time]
        dataout = sp.reshape(dataout,M1.shape)
    else:
        dataout = sp.reshape(geod.data[gkey][sliceindx,time],M1.shape,order=ir)

    title = insertinfo(title,gkey,geod.times[time,0],geod.times[time,1])


    if (ax is None) and (fig is None):
        fig = plt.figure(facecolor='white')
        ax = fig.gca()
    elif ax is None:
        ax = fig.gca()
    if m is None:
        ploth = ax.pcolor(M1,M2,dataout,vmin=vbounds[0], vmax=vbounds[1],cmap = cmap,
                          linewidth=0,rasterized=True)
        ploth.set_edgecolor('face')
        ax.axis([xyzvecs[veckeys[0]].min(), xyzvecs[veckeys[0]].max(),
                 xyzvecs[veckeys[1]].min(), xyzvecs[veckeys[1]].max()])
        if cbar:
            cbar2 = plt.colorbar(ploth, ax=ax, format='%.0e')
        else:
            cbar2 = None
        ax.set_title(title)
        ax.set_xlabel(veckeys[0])
        ax.set_ylabel(veckeys[1])
    else:
        N1,N2 = m(M1,M2)
        ploth = m.pcolor(N1,N2,dataout,vmin=vbounds[0], vmax=vbounds[1],cmap = cmap,
                         alpha=.4,linewidth=0,rasterized=True)

        if cbar:
            cbar2 = m.colorbar(ploth, format='%.0e')
        else:
            cbar2 = None


    return(ploth,cbar2)
示例#58
0
def scatterGD(geod,axstr,slicenum,vbounds=None,time = 0,gkey = None,cmap=defmap,fig=None,
              ax=None,title='',cbar=True,err=.1,m=None):
    """ This will make a scatter plot given a GeoData object."""
    poscoords = ['cartesian','wgs84','enu','ecef']
    assert geod.coordnames.lower() in poscoords

    if geod.coordnames.lower() in ['cartesian','enu','ecef']:
        axdict = {'x':0,'y':1,'z':2}
        veckeys = ['x','y','z']
    elif geod.coordnames.lower() == 'wgs84':
        axdict = {'lat':0,'long':1,'alt':2}# shows which row is this coordinate
        veckeys = ['long','lat','alt']# shows which is the x, y and z axes for plotting
    if type(axstr)==str:
        axis=axstr
    else:
        axis= veckeys[axstr]

    #determine the data name
    if gkey is None:
        gkey = geod.data.keys[0]
    geod=geod.timeslice(time)
    veckeys.remove(axis.lower())
    veckeys.append(axis.lower())
    datacoords = geod.dataloc
    xyzvecs = {l:sp.unique(datacoords[:,axdict[l]]) for l in veckeys}
    xyzvecsall = {l:datacoords[:,axdict[l]] for l in veckeys}
    if geod.issatellite():

        zdata = xyzvecsall[veckeys[2]]
        indxnum = np.abs(zdata-slicenum)<err
        xdata =xyzvecsall[veckeys[0]][indxnum]
        ydata =xyzvecsall[veckeys[1]][indxnum]
        dataout = geod.data[gkey][indxnum]
        title = insertinfo(title,gkey,geod.times[:,0].min(),geod.times[:,1].max())
    else:
        #make matrices
        xvec = xyzvecs[veckeys[0]]
        yvec = xyzvecs[veckeys[1]]
        M1,M2 = sp.meshgrid(xvec,yvec)
        slicevec = sp.unique(datacoords[:,axdict[axis]])
        min_idx = sp.argmin(sp.absolute(slicevec-slicenum))
        slicenum=slicevec[min_idx]
        rec_coords = {axdict[veckeys[0]]:M1.flatten(),axdict[veckeys[1]]:M2.flatten(),
                      axdict[axis]:slicenum*sp.ones(M2.size)}
        new_coords = sp.zeros((M1.size,3))
        xdata = M1.flatten()
        ydata= M2.flatten()

        #make coordinates
        for ckey in rec_coords.keys():
            new_coords[:,ckey] = rec_coords[ckey]


        # get the data location, first check if the data can be just reshaped then do a
        # search

        sliceindx = slicenum==datacoords[:,axdict[axis]]

        datacoordred = datacoords[sliceindx]
        rstypes = ['C','F','A']
        nfounds = True
        M1dlfl = datacoordred[:,axdict[veckeys[0]]]
        M2dlfl = datacoordred[:,axdict[veckeys[1]]]
        for ir in rstypes:
            M1dl = sp.reshape(M1dlfl,M1.shape,order =ir)
            M2dl = sp.reshape(M2dlfl,M1.shape,order =ir)
            if sp.logical_and(sp.allclose(M1dl,M1),sp.allclose(M2dl,M2)):
                nfounds=False
                break
        if nfounds:
            dataout = geod.datareducelocation(new_coords,geod.coordnames,gkey)[:,time]
            dataout = sp.reshape(dataout,M1.shape)
        else:
            dataout = sp.reshape(geod.data[gkey][sliceindx,time],M1.shape,order=ir)

        title = insertinfo(title,gkey,geod.times[time,0],geod.times[time,1])

    if (ax is None) and (fig is None):
        fig = plt.figure(facecolor='white')
        ax = fig.gca()
    elif ax is None:
        ax = fig.gca()
    if m is None:
        ploth = ax.scatter(xdata,ydata,c=dataout,vmin=vbounds[0], vmax=vbounds[1],cmap = cmap)
        ax.axis([xyzvecs[veckeys[0]].min(), xyzvecs[veckeys[0]].max(),
                 xyzvecs[veckeys[1]].min(), xyzvecs[veckeys[1]].max()])
        if cbar:
            cbar2 = plt.colorbar(ploth, ax=ax, format='%.0e')
        else:
            cbar2 = None
        ax.set_title(title)
        ax.set_xlabel(veckeys[0])
        ax.set_ylabel(veckeys[1])
    else:
        Xdata,Ydata = m(xdata,ydata)
        ploth = m.scatter(Xdata,Ydata,c=dataout,vmin=vbounds[0], vmax=vbounds[1],cmap = cmap)

        if cbar:
            cbar2 = m.colorbar(ploth)
        else:
            cbar2 = None

    return(ploth,cbar2)
示例#59
0
def glmnetPredict(fit,
                  newx=scipy.empty([0]),
                  s=scipy.empty([0]),
                  ptype='link',
                  exact=False,
                  offset=scipy.empty([0])):
    
    typebase = ['link', 'response', 'coefficients', 'nonzero', 'class']
    indxtf = [x.startswith(ptype.lower()) for x in typebase]
    indl = [i for i in range(len(indxtf)) if indxtf[i] == True]
    ptype = typebase[indl[0]]
    
    if newx.shape[0] == 0 and ptype != 'coefficients' and ptype != 'nonzero':
        raise ValueError('You need to supply a value for ''newx''')
    
    # python 1D arrays are not the same as matlab 1xn arrays
    # check for this. newx = x[0:1, :] is a python 2D array and would work; 
    # but newx = x[0, :] is a python 1D array and should not be passed into 
    # glmnetPredict    
    if len(newx.shape) == 1 and newx.shape[0] > 0:
        raise ValueError('newx must be a 2D (not a 1D) python array')
   
    if exact == True and len(s) > 0:
        # It is very messy to go back into the caller namespace
        # and call glmnet again. The user should really do this at their end
        # by calling glmnet again using the correct array of lambda values that
        # includes the lambda for which prediction is sought
        raise NotImplementedError('exact = True option is not implemented in python')

    # we convert newx to full here since sparse and full operations do not seem to 
    # be overloaded completely in scipy. 
    if scipy.sparse.issparse(newx):
        newx = newx.todense()
    
    # elnet
    if fit['class'] in ['elnet', 'fishnet', 'lognet']:
        if fit['class'] == 'lognet':
            a0 = fit['a0']
        else:    
            a0 = scipy.transpose(fit['a0'])
        
        a0 = scipy.reshape(a0, [1, a0.size])   # convert to 1 x N for appending
        nbeta = scipy.row_stack( (a0, fit['beta']) )        
        if scipy.size(s) > 0:
            lambdau = fit['lambdau']
            lamlist = lambda_interp(lambdau, s)
            nbeta = nbeta[:, lamlist['left']]*scipy.tile(scipy.transpose(lamlist['frac']), [nbeta.shape[0], 1]) \
            + nbeta[:, lamlist['right']]*( 1 - scipy.tile(scipy.transpose(lamlist['frac']), [nbeta.shape[0], 1]))
            
        if ptype == 'coefficients':
            result = nbeta
            return result
            
        if ptype == 'nonzero':
            result = nonzeroCoef(nbeta[1:nbeta.shape[0], :], True)
            return result
        # use scipy.sparse.hstack instead of column_stack for sparse matrices        
        result = scipy.dot(scipy.column_stack((scipy.ones([newx.shape[0], 1]), newx)), nbeta)
        if fit['offset']:
            if len(offset) == 0:
                raise ValueError('No offset provided for prediction, yet used in fit of glmnet')
            if offset.shape[1] == 2:
                offset = offset[:, 1]
                
            result = result + scipy.tile(offset, [1, result.shape[1]])    

    # fishnet                
    if fit['class'] == 'fishnet' and ptype == 'response':
        result = scipy.exp(result)

    # lognet
    if fit['class'] == 'lognet':
        if ptype == 'response':
            pp = scipy.exp(-result)
            result = 1/(1 + pp)
        elif ptype == 'class':
            result = (result > 0)*1 + (result <= 0)*0
            result = fit['label'][result]

    # multnet / mrelnet
    if fit['class'] == 'mrelnet' or fit['class'] == 'multnet':
        if fit['class'] == 'mrelnet':
            if type == 'response':
                ptype = 'link'
            fit['grouped'] = True
        
        a0 = fit['a0']
        nbeta = fit['beta'].copy()
        nclass = a0.shape[0]
        nlambda = s.size
        
        if len(s) > 0:
            lambdau = fit['lambdau']
            lamlist = lambda_interp(lambdau, s)
            for i in range(nclass):
                kbeta = scipy.row_stack( (a0[i, :], nbeta[i]) )
                kbeta = kbeta[:, lamlist['left']]*scipy.tile(scipy.transpose(lamlist['frac']), [kbeta.shape[0], 1]) \
                        + kbeta[:, lamlist['right']]*( 1 - scipy.tile(scipy.transpose(lamlist['frac']), [kbeta.shape[0], 1]))
                nbeta[i] = kbeta
        else:
            for i in range(nclass):
                nbeta[i] = scipy.row_stack( (a0[i, :], nbeta[i]) )
            nlambda = len(fit['lambdau'])    

        if ptype == 'coefficients':
            result = nbeta
            return result
            
        if ptype == 'nonzero':
            if fit['grouped']:
                result = list()
                tn = nbeta[0].shape[0]
                result.append(nonzeroCoef(nbeta[0][1:tn, :], True))
            else:
                result = list()
                for i in range(nclass):
                    tn = nbeta[0].shape[0]
                    result.append(nonzeroCoef(nbeta[0][1:tn, :], True))  
            return result
            
        npred = newx.shape[0]
        dp = scipy.zeros([nclass, nlambda, npred], dtype=scipy.float64)
        for i in range(nclass):
            qq = scipy.column_stack((scipy.ones([newx.shape[0], 1]), newx))
            fitk = scipy.dot(qq, nbeta[i])
            dp[i, :, :] = dp[i, :, :] + scipy.reshape(scipy.transpose(fitk), [1, nlambda, npred])

        if fit['offset']:
            if len(offset) == 0:
                raise ValueError('No offset provided for prediction, yet used in fit of glmnet')
            if offset.shape[1] != nclass:
                raise ValueError('Offset should be dimension %d x %d' % (npred, nclass))
            toff = scipy.transpose(offset)
            for i in range(nlambda):
                dp[:, i, :] = dp[:, i, :] + toff
                
        if ptype == 'response':
            pp = scipy.exp(dp)
            psum = scipy.sum(pp, axis=0, keepdims=True)
            result = scipy.transpose(pp/scipy.tile(psum, [nclass, 1, 1]), [2, 0, 1])
        if ptype == 'link':
            result = scipy.transpose(dp, [2, 0, 1])
        if ptype == 'class':
            dp = scipy.transpose(dp, [2, 0, 1])
            result = list()
            for i in range(dp.shape[2]):
                t = softmax(dp[:, :, i])
                result = scipy.append(result, fit['label'][t['pclass']])

    # coxnet
    if fit['class'] == 'coxnet':
        nbeta = fit['beta']        
        if len(s) > 0:
            lambdau = fit['lambdau']
            lamlist = lambda_interp(lambdau, s)
            nbeta = nbeta[:, lamlist['left']]*scipy.tile(scipy.transpose(lamlist['frac']), [nbeta.shape[0], 1]) \
            + nbeta[:, lamlist['right']]*( 1 - scipy.tile(scipy.transpose(lamlist['frac']), [nbeta.shape[0], 1]))
            
        if ptype == 'coefficients':
            result = nbeta
            return result
            
        if ptype == 'nonzero':
            result = nonzeroCoef(nbeta, True)
            return result
        
        result = scipy.dot(newx * nbeta)
        
        if fit['offset']:
            if len(offset) == 0:
                raise ValueError('No offset provided for prediction, yet used in fit of glmnet')                              

            result = result + scipy.tile(offset, [1, result.shape[1]])    
        
        if ptype == 'response':
            result = scipy.exp(result)

    return result
示例#60
0
xmesh = []
ymesh = []
zmesh = []
icount = 0
cov_idx = npy.zeros(
    (nvars, nvars),
    dtype=npy.int)  # 2D array to keep track of which index in xmesh etc. the
# the plots corresponding to vars i,j belong to
for j in range(istart + 1, istart + nvars):
    for i in range(istart, j):
        if (debug > 0):
            print "Computing 2D marginal distribution between variables:", i, ",", j, ":", vnames[
                i], " & ", vnames[j]
        x, y = mgrid[d0[nskip:, i].min():d0[nskip:, i].max():kde_idx,
                     d0[nskip:, j].min():d0[nskip:, j].max():kde_idx]
        z = reshape(kern_i_j[icount](c_[x.ravel(), y.ravel()].T).T, x.T.shape)
        xmesh.append(x)
        ymesh.append(y)
        zmesh.append(z)
        cov_idx[i, j] = icount
        icount = icount + 1

# Section 4
# evaluate 1D pdfs
print "Evaluating 1D marginal pdfs with KDE"
xlin = []
pdflin = []
for i in range(istart, istart + nvars):
    xlin.append(npy.linspace(d0[nskip:, i].min(), d0[nskip:, i].max(), np_kde))
    kernlin = stats.kde.gaussian_kde(d0[nskip::nthin, i])
    pdflin.append(kernlin(xlin[i - istart]))