예제 #1
0
def MakePulseDataRepLPC(pulse, spec, N, rep1, numtype=sp.complex128):
    """ This will make data by assuming the data is an autoregressive process.
        Inputs
            spec - The properly weighted spectrum.
            N - The size of the ar process used to model the filter.
            pulse - The pulse shape.
            rep1 - The number of repeats of the process.
        Outputs
            outdata - A numpy Array with the shape of the """

    lp = len(pulse)
    r1 = scfft.ifft(scfft.ifftshift(spec))
    rp1 = r1[:N]
    rp2 = r1[1:N + 1]
    # Use Levinson recursion to find the coefs for the data
    xr1 = sp.linalg.solve_toeplitz(rp1, rp2)
    lpc = sp.r_[sp.ones(1), -xr1]
    # The Gain  term.
    G = sp.sqrt(sp.sum(sp.conjugate(r1[:N + 1]) * lpc))
    Gvec = sp.r_[G, sp.zeros(N)]
    Npnt = (N + 1) * 3 + lp
    # Create the noise vector and normalize
    xin = sp.random.randn(rep1, Npnt) + 1j * sp.random.randn(rep1, Npnt)
    xinsum = sp.tile(
        sp.sqrt(sp.sum(xin.real**2 + xin.imag**2, axis=1))[:, sp.newaxis],
        (1, Npnt))
    xin = xin / xinsum / sp.sqrt(2.)
    outdata = sp.signal.lfilter(Gvec, lpc, xin, axis=1)
    outpulse = sp.tile(pulse[sp.newaxis], (rep1, 1))
    outdata = outpulse * outdata[:, N:N + lp]
    return outdata
예제 #2
0
def sinc_interp1d(x, s, r):
    """Interpolates `x`, sampled at times `s`
    Output `y` is sampled at times `r`

    inspired from from Matlab:
    http://phaseportrait.blogspot.com/2008/06/sinc-interpolation-in-matlab.html

    :param ndarray x: input data time series
    :param ndarray s: input sampling time series (regular sample interval)
    :param ndarray r: output sampling time series
    :return ndarray: output data time series (regular sample interval)
    """

    # init
    s = sp.asarray(s)
    r = sp.asarray(r)
    x = sp.asarray(x)
    if x.ndim == 1:
        x = sp.atleast_2d(x)
    else:
        if x.shape[0] == len(s):
            x = x.T
        else:
            if x.shape[1] != s.shape[0]:
                raise ValueError('x and s must be same temporal extend')
    if sp.allclose(s, r):
        return x.T
    T = s[1] - s[0]

    # resample
    sincM = sp.tile(r, (len(s), 1)) - sp.tile(s[:, sp.newaxis], (1, len(r)))
    return sp.vstack([sp.dot(xx, sp.sinc(sincM / T)) for xx in x]).T
예제 #3
0
def massmatrix_rowcols(complex, k):
    """
    Compute the row and column arrays in the COO
    format of the Whitney form mass matrix
    """
    simplices = complex[-1].simplices
    num_simplices = simplices.shape[0]
    p = complex.complex_dimension()

    if k == p:
        #top dimension
        rows = arange(num_simplices, dtype=simplices.dtype)
        cols = arange(num_simplices, dtype=simplices.dtype)
        return rows, cols

    k_faces = [tuple(x) for x in combinations(range(p + 1), k + 1)]

    faces_per_simplex = len(k_faces)
    num_faces = num_simplices * faces_per_simplex
    faces = empty((num_faces, k + 1), dtype=simplices.dtype)

    for n, face in enumerate(k_faces):
        for m, i in enumerate(face):
            faces[n::faces_per_simplex, m] = simplices[:, i]

    #faces.sort() #we can't assume that the p-simplices are sorted

    indices = simplex_array_searchsorted(complex[k].simplices, faces)

    rows = tile(indices.reshape((-1, 1)), (faces_per_simplex, )).flatten()
    cols = tile(indices.reshape((-1, faces_per_simplex)),
                (faces_per_simplex, )).flatten()

    return rows, cols
예제 #4
0
def trueFeatureStats(T, R, fMap, discountFactor, stateProp=1, MAT_LIMIT=1e8):
    """ Gather the statistics needed for LSTD,
    assuming infinite data (true probabilities).
    Option: if stateProp is  < 1, then only a proportion of all 
    states will be seen as starting state for transitions """
    dim = len(fMap)
    numStates = len(T)
    statMatrix = zeros((dim, dim))
    statResidual = zeros(dim)
    ss = range(numStates)
    repVersion = False
    
    if stateProp < 1:
        ss = random.sample(ss, int(numStates * stateProp))
    elif dim * numStates**2 < MAT_LIMIT:
        repVersion = True
    
    # two variants, depending on how large we can afford our matrices to become.        
    if repVersion:    
        tmp1 = tile(fMap, (numStates,1,1))
        tmp2 = transpose(tmp1, (2,1,0))
        tmp3 = tmp2 - discountFactor * tmp1            
        tmp4 = tile(T, (dim,1,1))
        tmp4 *= transpose(tmp1, (1,2,0))
        statMatrix = tensordot(tmp3, tmp4, axes=[[0,2], [1,2]]).T
        statResidual = dot(R, dot(fMap, T).T)
    else:
        for sto in ss:
            tmp = fMap - discountFactor * repmat(fMap[:, sto], numStates, 1).T
            tmp2 = fMap * repmat(T[:, sto], dim, 1)
            statMatrix += dot(tmp2, tmp.T)             
            statResidual += R[sto] * dot(fMap, T[:, sto])
    return statMatrix, statResidual
예제 #5
0
def MakePulseDataRepLPC(pulse,spec,N,rep1,numtype = sp.complex128):
    """ This will make data by assuming the data is an autoregressive process.
        Inputs
            spec - The properly weighted spectrum.
            N - The size of the ar process used to model the filter.
            pulse - The pulse shape.
            rep1 - The number of repeats of the process.
        Outputs
            outdata - A numpy Array with the shape of the """

    lp = len(pulse)
    lenspec = len(spec)
    r1 = scfft.ifft(scfft.ifftshift(spec))
    rp1 = r1[:N]
    rp2 = r1[1:N+1]
    # Use Levinson recursion to find the coefs for the data
    xr1 = sp.linalg.solve_toeplitz(rp1, rp2)
    lpc = sp.r_[sp.ones(1), -xr1]
    # The Gain  term.
    G = sp.sqrt(sp.sum(sp.conjugate(r1[:N+1])*lpc))
    Gvec = sp.r_[G, sp.zeros(N)]
    Npnt = (N+1)*3+lp
    # Create the noise vector and normalize
    xin = sp.random.randn(rep1, Npnt)+1j*sp.random.randn(rep1, Npnt)
    xinsum = sp.tile(sp.sqrt(sp.mean(xin.real**2+xin.imag**2, axis=1))[:, sp.newaxis],(1, Npnt))
    xin = xin/xinsum
    outdata = sp.signal.lfilter(Gvec, lpc, xin, axis=1)
    outpulse = sp.tile(pulse[sp.newaxis], (rep1, 1))
    outdata = outpulse*outdata[:, 2*N:2*N+lp]
    return outdata
예제 #6
0
파일: qtl_old.py 프로젝트: jlmaccal/limix
def phenSpecificEffects(snps,pheno1,pheno2,K=None,covs=None,test='lrt'):
    """
    Univariate fixed effects interaction test for phenotype specific SNP effects

    Args:
        snps:   [N x S] SP.array of S SNPs for N individuals (test SNPs)
        pheno1: [N x 1] SP.array of 1 phenotype for N individuals
        pheno2: [N x 1] SP.array of 1 phenotype for N individuals
        K:      [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
                        If not provided, then linear regression analysis is performed
        covs:   [N x D] SP.array of D covariates for N individuals
        test:    'lrt' for likelihood ratio test (default) or 'f' for F-test

    Returns:
        limix LMM object
    """
    N=snps.shape[0]
    if K is None:
        K=SP.eye(N)
    assert (pheno1.shape[1]==pheno2.shape[1]), "Only consider equal number of phenotype dimensions"
    if covs is None:
        covs = SP.ones(N,1)
    assert (pheno1.shape[1]==1 and pheno2.shape[1]==1 and pheno1.shape[0]==N and pheno2.shape[0]==N and K.shape[0]==N and K.shape[1]==N and covs.shape[0]==N), "shapes missmatch"
    Inter = SP.zeros((N*2,1))
    Inter[0:N,0]=1
    Inter0 = SP.ones((N*2,1))
    Yinter=SP.concatenate((pheno1,pheno2),0)
    Xinter = SP.tile(snps,(2,1))
    Covitner= SP.tile(covs(2,1))
    lm = simple_interaction(snps=Xinter,pheno=Yinter,covs=Covinter,Inter=Inter,Inter0=Inter0,test=test)
    return lm
예제 #7
0
def MakePulseDataRep(pulse_shape, filt_freq, delay=16,rep=1,numtype = sp.complex128):
    """ This function will create a repxLp numpy array, where rep is number of independent
        repeats and Lp is number of pulses, of noise shaped by the filter who's frequency
        response is passed as the parameter filt_freq. The pulse shape is delayed by the parameter
        delay into the data. The noise vector that will be multiplied by the filter's frequency
        response will be zero mean complex white Gaussian noise with a power of 1. The user
        then will need to multiply the filter by its size to get the desired power from using
        the function.
        Inputs:
            pulse_shape: A numpy array that holds the shape of the single pulse.
            filt_freq - a numpy array that holds the complex frequency response of the filter
            that will be used to shape the noise data.
            delay - The number of samples that the pulse will be delayed into the
            array of noise data to avoid any problems with filter overlap.
            rep - Number of indepent samples/pulses shaped by the filter.
            numtype - The type of numbers used for the output.
        Output
            data_out - A repxLp of data that has been shaped by the filter. Points along
            The first axis are independent of each other while samples along the second
            axis are colored using the filter and multiplied by the pulse shape.
    """
    npts = len(filt_freq)
    filt_tile = sp.tile(filt_freq[sp.newaxis,:],(rep,1))
    shaperep = sp.tile(pulse_shape[sp.newaxis,:],(rep,1))
    noisereal = sp.random.randn(rep,npts).astype(numtype)
    noiseimag = sp.random.randn(rep,npts).astype(numtype)
    noise_vec =(noisereal+1j*noiseimag)/sp.sqrt(2.0)
#    noise_vec = noisereal
    mult_freq = filt_tile.astype(numtype)*noise_vec
    data = scfft.ifft(mult_freq,axis=-1)
    data_out = shaperep*data[:,delay:(delay+len(pulse_shape))]
    return data_out
예제 #8
0
def massmatrix_rowcols(complex,k):
    """
    Compute the row and column arrays in the COO
    format of the Whitney form mass matrix
    """
    simplices = complex[-1].simplices
    num_simplices = simplices.shape[0]
    p = complex.complex_dimension()
    
    if k == p:
        #top dimension
        rows = arange(num_simplices,dtype=simplices.dtype)
        cols = arange(num_simplices,dtype=simplices.dtype)
        return rows,cols
    
    k_faces = [tuple(x) for x in combinations(range(p+1),k+1)]

    faces_per_simplex = len(k_faces)
    num_faces = num_simplices*faces_per_simplex
    faces     = empty((num_faces,k+1),dtype=simplices.dtype)
   
    for n,face in enumerate(k_faces):
        for m,i in enumerate(face):
            faces[n::faces_per_simplex,m] = simplices[:,i]

    #faces.sort() #we can't assume that the p-simplices are sorted

    indices = simplex_array_searchsorted(complex[k].simplices,faces)

    rows = tile(indices.reshape((-1,1)),(faces_per_simplex,)).flatten()
    cols = tile(indices.reshape((-1,faces_per_simplex)),(faces_per_simplex,)).flatten()

    return rows,cols
예제 #9
0
def rpeuc(x, m=1, t=1, e=0.1, normed=True):
	"""Returns the recurrence matrix based on Euclidean metric.
	"""
	x = x.squeeze()
	if normed: x = (x - x.mean()) / x.std()
	# embed x with dimension 'm' and delay 't'
	n = x.shape[0]
	step = (m-1)*t
	count = step+1
	y = sp.zeros((n-count+1,m))
	for i in range(n-count+1):
		tt = i+step
		y[i,:] = x[-(n-tt):-(n-tt)-step-1:-t]
	# get distance matrix
	n = y.shape[0]
	dist = sp.zeros((n,n))
	if m > 1:
		for i in range(n-1):
		    dist[i+1:n,i] = sp.sqrt(sp.sum(sp.square(sp.tile(y[i,:], 
								     (n-i-1, 1)) -                                                      y[i+1:n,:]), axis=1
						   ))            
	elif m == 1:
		for i in range(n-1):                   
		    dist[i+1:n,i] = abs(sp.tile(y[i], (n-i-1,1)) -
					y[i+1:n]).squeeze()
	dist = dist + dist.T
	RP = sp.zeros((n,n), dtype=sp.int8)
	RP[(dist <= e) & (dist > 0)] = 1
	RP = RP + sp.eye(n, dtype=sp.int8)
	return RP
예제 #10
0
def sinc_interp1d(x, s, r):
    """Interpolates `x`, sampled at times `s`
    Output `y` is sampled at times `r`

    inspired from from Matlab:
    http://phaseportrait.blogspot.com/2008/06/sinc-interpolation-in-matlab.html

    :param ndarray x: input data time series
    :param ndarray s: input sampling time series (regular sample interval)
    :param ndarray r: output sampling time series
    :return ndarray: output data time series (regular sample interval)
    """

    # init
    s = sp.asarray(s)
    r = sp.asarray(r)
    x = sp.asarray(x)
    if x.ndim == 1:
        x = sp.atleast_2d(x)
    else:
        if x.shape[0] == len(s):
            x = x.T
        else:
            if x.shape[1] != s.shape[0]:
                raise ValueError('x and s must be same temporal extend')
    if sp.allclose(s, r):
        return x.T
    T = s[1] - s[0]

    # resample
    sincM = sp.tile(r, (len(s), 1)) - sp.tile(s[:, sp.newaxis], (1, len(r)))
    return sp.vstack([sp.dot(xx, sp.sinc(sincM / T)) for xx in x]).T
예제 #11
0
    def _LMLgrad_covar(self, hyperparams):
        """
        evaluates the gradient of the log marginal likelihood with respect to the
        hyperparameters of the covariance function
        """
        try:
            KV = self.get_covariances(hyperparams)
        except LA.LinAlgError:
            LG.error('linalg exception in _LMLgrad_covar')
            return {
                'covar_r': SP.zeros(len(hyperparams['covar_r'])),
                'covar_c': SP.zeros(len(hyperparams['covar_c'])),
                'covar_r': SP.zeros(len(hyperparams['covar_r']))
            }
        except ValueError:
            LG.error('value error in _LMLgrad_covar')
            return {
                'covar_r': SP.zeros(len(hyperparams['covar_r'])),
                'covar_c': SP.zeros(len(hyperparams['covar_c'])),
                'covar_r': SP.zeros(len(hyperparams['covar_r']))
            }

        RV = {}
        Si = unravel(1. / KV['S'], self.n, self.t)

        if 'covar_r' in hyperparams:
            theta = SP.zeros(len(hyperparams['covar_r']))
            for i in range(len(theta)):
                Kgrad_r = self.covar_r.Kgrad_theta(hyperparams['covar_r'], i)
                d = (KV['U_r'] * SP.dot(Kgrad_r, KV['U_r'])).sum(0)
                LMLgrad_det = reduce(SP.dot, [d, Si, KV['S_c_tilde']])
                UdKU = reduce(SP.dot, [KV['U_r'].T, Kgrad_r, KV['U_r']])
                SYUdKU = SP.dot(
                    UdKU,
                    (KV['Ytilde'] * SP.tile(KV['S_c_tilde'][SP.newaxis, :],
                                            (self.n, 1))))
                LMLgrad_quad = -(KV['Ytilde'] * SYUdKU).sum()
                LMLgrad = 0.5 * (LMLgrad_det + LMLgrad_quad)
                theta[i] = LMLgrad
            RV['covar_r'] = theta

        if 'covar_c' in hyperparams:
            theta = SP.zeros(len(hyperparams['covar_c']))
            for i in range(len(theta)):
                Kgrad_c = self.covar_c.Kgrad_theta(hyperparams['covar_c'], i)
                S_c_tilde_grad = reduce(
                    SP.dot, [KV['UBinvB'], Kgrad_c, KV['UBinvB'].T])
                LMLgrad_det = reduce(
                    SP.dot,
                    [KV['S_r'], Si, SP.diag(S_c_tilde_grad)])
                SYUdKU = SP.dot(
                    (KV['Ytilde'] * SP.tile(KV['S_r'][:, SP.newaxis],
                                            (1, self.t))), S_c_tilde_grad.T)
                LMLgrad_quad = -SP.sum(KV['Ytilde'] * SYUdKU)
                LMLgrad = 0.5 * (LMLgrad_det + LMLgrad_quad)
                theta[i] = LMLgrad
            RV['covar_c'] = theta

        return RV
예제 #12
0
def plot_results_interval(twosample_interval_object, xlabel='Time/hr', ylabel='expression level', title="", legend=False, *args, **kwargs):
        """
        Plot results of resampling of a (subclass of) 
        :py:class:`gptwosample.twosample.interval_smooth.GPTwoSampleInterval`.
        This method will predict some data new, for plotting purpose.
        
        **Parameters:**
        
        twosample_interval_object: :py:class:`gptwosample.twosample.interval_smooth`
            The GPTwosample resample object, from which to take the results.
        """
        
        predicted_indicators = twosample_interval_object.get_predicted_indicators()
        model_dist,Xp = twosample_interval_object.get_predicted_model_distribution()
        
        IS = SP.tile(~predicted_indicators, twosample_interval_object._n_replicates_ind)
        IJ = SP.tile(predicted_indicators, twosample_interval_object._n_replicates_comm)

        # predict GPTwoSample object with indicators as interval_indices
        if(IS.any() and IJ.any()):
            twosample_interval_object._twosample_object.predict_model_likelihoods(\
                interval_indices={individual_id:IS, common_id:IJ}, messages=False)
            twosample_interval_object._twosample_object.predict_mean_variance(Xp,\
                interval_indices={individual_id:IS, common_id:IJ})
        else:
            twosample_interval_object._twosample_object.predict_model_likelihoods(messages=False)
            twosample_interval_object._twosample_object.predict_mean_variance(Xp)
        #now plot stuff
        ax1 = PL.axes([0.15, 0.1, 0.8, 0.7])

        plot_results(twosample_interval_object._twosample_object, 
                     alpha=model_dist, 
                     legend=legend,#interval_indices={individual_id:IS, common_id:IJ},
                     xlabel=xlabel,
                     ylabel=ylabel,
                     title="", *args, **kwargs)
        
        PL.suptitle(title,fontsize=20)
        
        PL.xlim([Xp.min(), Xp.max()])
        yticks = ax1.get_yticks()[0:-1]
        ax1.set_yticks(yticks)
        
        data = twosample_interval_object._twosample_object.get_data(common_id)
        Ymax = data[1].max()
        Ymin = data[1].min()
        
        DY = Ymax - Ymin
        PL.ylim([Ymin - 0.1 * DY, Ymax + 0.1 * DY])
        #2nd. plot prob. of diff
        ax2 = PL.axes([0.15, 0.8, 0.8, 0.10], sharex=ax1)
        PL.plot(Xp, model_dist, 'k-', linewidth=2)
        PL.ylabel('$P(z(t)=1)$')
#            PL.yticks([0.0,0.5,1.0])
        PL.yticks([0.5])           
        #horizontal bar
        PL.axhline(linewidth=0.5, color='#aaaaaa', y=0.5)
        PL.ylim([0, 1])
        PL.setp(ax2.get_xticklabels(), visible=False)
예제 #13
0
    def _LMLgrad_covar(self,hyperparams,debugging=False):
        """
        evaluates the gradient of the log marginal likelihood with respect to the
        hyperparameters of the covariance function
        """
        try:
            KV = self.get_covariances(hyperparams,debugging=debugging)
        except LA.LinAlgError:
            LG.error('linalg exception in _LMLgrad_covar')
            return {'covar_r':SP.zeros(len(hyperparams['covar_r'])),'covar_c':SP.zeros(len(hyperparams['covar_c'])),'covar_r':SP.zeros(len(hyperparams['covar_r']))}
        except ValueError:
            LG.error('value error in _LMLgrad_covar')
            return {'covar_r':SP.zeros(len(hyperparams['covar_r'])),'covar_c':SP.zeros(len(hyperparams['covar_c'])),'covar_r':SP.zeros(len(hyperparams['covar_r']))}
 
        RV = {}
        Si = unravel(1./KV['S'],self.n,self.t)

        if 'covar_r' in hyperparams:
            theta = SP.zeros(len(hyperparams['covar_r']))
            for i in range(len(theta)):
                Kgrad_r = self.covar_r.Kgrad_theta(hyperparams['covar_r'],i)
                d=(KV['U_r']*SP.dot(Kgrad_r,KV['U_r'])).sum(0)
                LMLgrad_det = SP.dot(d,SP.dot(Si,KV['S_c']))
                UdKU = SP.dot(KV['U_r'].T,SP.dot(Kgrad_r,KV['U_r']))
                SYUdKU = SP.dot(UdKU,(KV['Ytilde']*SP.tile(KV['S_c'][SP.newaxis,:],(self.n,1))))
                LMLgrad_quad = - (KV['Ytilde']*SYUdKU).sum()
                LMLgrad = 0.5*(LMLgrad_det + LMLgrad_quad)
                theta[i] = LMLgrad

                if debugging:
                    Kd = SP.kron(KV['K_c'], Kgrad_r)
                    _LMLgrad = 0.5 * (KV['W']*Kd).sum()
                    assert SP.allclose(LMLgrad,_LMLgrad), 'ouch, gradient is wrong for covar_r'
                    
            RV['covar_r'] = theta

        if 'covar_c' in hyperparams:
            theta = SP.zeros(len(hyperparams['covar_c']))
            for i in range(len(theta)):
                Kgrad_c = self.covar_c.Kgrad_theta(hyperparams['covar_c'],i)

                d=(KV['U_c']*SP.dot(Kgrad_c,KV['U_c'])).sum(0)
                LMLgrad_det = SP.dot(KV['S_r'],SP.dot(Si,d))

                UdKU = SP.dot(KV['U_c'].T,SP.dot(Kgrad_c,KV['U_c']))
                SYUdKU = SP.dot((KV['Ytilde']*SP.tile(KV['S_r'][:,SP.newaxis],(1,self.t))),UdKU.T)
                LMLgrad_quad = -SP.sum(KV['Ytilde']*SYUdKU)
                LMLgrad = 0.5*(LMLgrad_det + LMLgrad_quad)
                theta[i] = LMLgrad
            
                if debugging:
                    Kd = SP.kron(Kgrad_c, KV['K_r'])
                    _LMLgrad = 0.5 * (KV['W']*Kd).sum()
                    assert SP.allclose(LMLgrad,_LMLgrad), 'ouch, gradient is wrong for covar_c'
                    
                RV['covar_c'] = theta

        return RV
예제 #14
0
def regular_cube_innerproduct(rcc, k):
    """
    For a given regular_cube_complex, compute a matrix
    representing the k-form innerproduct.

    These elements are similar to Whitney forms,
    except using standard linear (bilinear,trilinear,..)
    elements for 0-forms.
    """

    N = rcc.complex_dimension()

    #standard cube is [0,0,..,0] [0,1,...,N]
    standard_cube = atleast_2d(array([0] * N + range(N), dtype='i'))
    standard_k_faces = standard_cube
    for i in range(N, k, -1):
        standard_k_faces = cube_array_boundary(standard_k_faces, i)[0]

    k_faces_per_cube = standard_k_faces.shape[0]

    K = zeros((k_faces_per_cube, k_faces_per_cube))  #local stiffness matrix
    h = 1
    V = h**N  #cube volume
    scale = V * (1 / h)**2 * (1 / 3.0)**(N - k)
    for i, row_i in enumerate(standard_k_faces):
        for j, row_j in enumerate(standard_k_faces):
            if all(row_i[N:] == row_j[N:]):
                differences = (row_i[:N] != row_j[:N])
                differences[row_i[N:]] = 0
                K[i, j] = scale * (1.0 / 2.0)**sum(differences)
            else:
                K[i, j] = 0

    CA = rcc[-1].cube_array[:, :N]
    num_cubes = CA.shape[0]

    k_faces = tile(hstack((CA, zeros((CA.shape[0], k), dtype=CA.dtype))),
                   (1, k_faces_per_cube)).reshape((-1, N + k))
    k_faces += tile(standard_k_faces, (num_cubes, 1))

    k_face_array = rcc[k].cube_array

    face_indices = cube_array_search(k_face_array, k_faces)

    rows = face_indices.repeat(k_faces_per_cube)
    cols = face_indices.reshape(
        (-1, k_faces_per_cube)).repeat(k_faces_per_cube, axis=0).reshape(
            (-1, ))
    data = K.reshape((1, -1)).repeat(num_cubes, axis=0).reshape((-1, ))

    # temporary memory cost solution - eliminate zeros from COO representation
    nz_mask = data != 0.0
    rows = rows[nz_mask]
    cols = cols[nz_mask]
    data = data[nz_mask]

    shape = (len(k_face_array), len(k_face_array))
    return coo_matrix((data, (rows, cols)), shape).tocsr()
예제 #15
0
    def _LMLgrad_covar(self,hyperparams,debugging=False):
        """
        evaluates the gradient of the log marginal likelihood with respect to the
        hyperparameters of the covariance function
        """
        try:
            KV = self.get_covariances(hyperparams,debugging=debugging)
        except LA.LinAlgError:
            LG.error('linalg exception in _LMLgrad_covar')
            return {'covar_r':SP.zeros(len(hyperparams['covar_r'])),'covar_c':SP.zeros(len(hyperparams['covar_c'])),'covar_r':SP.zeros(len(hyperparams['covar_r']))}
        except ValueError:
            LG.error('value error in _LMLgrad_covar')
            return {'covar_r':SP.zeros(len(hyperparams['covar_r'])),'covar_c':SP.zeros(len(hyperparams['covar_c'])),'covar_r':SP.zeros(len(hyperparams['covar_r']))}
 
        RV = {}
        Si = unravel(1./KV['S'],self.n,self.t)

        if 'covar_r' in hyperparams:
            theta = SP.zeros(len(hyperparams['covar_r']))
            for i in range(len(theta)):
                Kgrad_r = self.covar_r.Kgrad_theta(hyperparams['covar_r'],i)
                d=(KV['U_r']*SP.dot(Kgrad_r,KV['U_r'])).sum(0)
                LMLgrad_det = SP.dot(d,SP.dot(Si,KV['S_c']))
                UdKU = SP.dot(KV['U_r'].T,SP.dot(Kgrad_r,KV['U_r']))
                SYUdKU = SP.dot(UdKU,(KV['Ytilde']*SP.tile(KV['S_c'][SP.newaxis,:],(self.n,1))))
                LMLgrad_quad = - (KV['Ytilde']*SYUdKU).sum()
                LMLgrad = 0.5*(LMLgrad_det + LMLgrad_quad)
                theta[i] = LMLgrad

                if debugging:
                    Kd = SP.kron(KV['K_c'], Kgrad_r)
                    _LMLgrad = 0.5 * (KV['W']*Kd).sum()
                    assert SP.allclose(LMLgrad,_LMLgrad), 'ouch, gradient is wrong for covar_r'
                    
            RV['covar_r'] = theta

        if 'covar_c' in hyperparams:
            theta = SP.zeros(len(hyperparams['covar_c']))
            for i in range(len(theta)):
                Kgrad_c = self.covar_c.Kgrad_theta(hyperparams['covar_c'],i)

                d=(KV['U_c']*SP.dot(Kgrad_c,KV['U_c'])).sum(0)
                LMLgrad_det = SP.dot(KV['S_r'],SP.dot(Si,d))

                UdKU = SP.dot(KV['U_c'].T,SP.dot(Kgrad_c,KV['U_c']))
                SYUdKU = SP.dot((KV['Ytilde']*SP.tile(KV['S_r'][:,SP.newaxis],(1,self.t))),UdKU.T)
                LMLgrad_quad = -SP.sum(KV['Ytilde']*SYUdKU)
                LMLgrad = 0.5*(LMLgrad_det + LMLgrad_quad)
                theta[i] = LMLgrad
            
                if debugging:
                    Kd = SP.kron(Kgrad_c, KV['K_r'])
                    _LMLgrad = 0.5 * (KV['W']*Kd).sum()
                    assert SP.allclose(LMLgrad,_LMLgrad), 'ouch, gradient is wrong for covar_c'
                    
                RV['covar_c'] = theta

        return RV
예제 #16
0
def regular_cube_innerproduct(rcc,k):      
    """
    For a given regular_cube_complex, compute a matrix
    representing the k-form innerproduct.

    These elements are similar to Whitney forms,
    except using standard linear (bilinear,trilinear,..)
    elements for 0-forms.
    """

    N = rcc.complex_dimension()

    #standard cube is [0,0,..,0] [0,1,...,N]   
    standard_cube  = atleast_2d(array([0]*N + range(N),dtype='i'))
    standard_k_faces = standard_cube
    for i in range(N,k,-1):        
        standard_k_faces = cube_array_boundary(standard_k_faces,i)[0]

        
    k_faces_per_cube = standard_k_faces.shape[0]


    K = zeros((k_faces_per_cube,k_faces_per_cube)) #local stiffness matrix
    h = 1
    V = h**N #cube volume
    scale = V * (1/h)**2 * (1/3.0)**(N-k)
    for i,row_i in enumerate(standard_k_faces):
        for j,row_j in enumerate(standard_k_faces):
            if all(row_i[N:] == row_j[N:]):
                differences = (row_i[:N] != row_j[:N])
                differences[row_i[N:]] = 0                
                K[i,j] = scale * (1.0/2.0)**sum(differences)
            else:
                K[i,j] = 0
        

    CA = rcc[-1].cube_array[:,:N]
    num_cubes = CA.shape[0]

    k_faces  = tile(hstack((CA,zeros((CA.shape[0],k),dtype=CA.dtype))),(1,k_faces_per_cube)).reshape((-1,N+k))
    k_faces += tile(standard_k_faces,(num_cubes,1))
    
    k_face_array = rcc[k].cube_array

    face_indices = cube_array_search(k_face_array,k_faces)

    rows = face_indices.repeat(k_faces_per_cube)
    cols = face_indices.reshape((-1,k_faces_per_cube)).repeat(k_faces_per_cube,axis=0).reshape((-1,))
    data = K.reshape((1,-1)).repeat(num_cubes,axis=0).reshape((-1,))
    
    # temporary memory cost solution - eliminate zeros from COO representation
    nz_mask = data != 0.0
    rows = rows[nz_mask]
    cols = cols[nz_mask]
    data = data[nz_mask]

    shape = (len(k_face_array),len(k_face_array))
    return coo_matrix( (data,(rows,cols)), shape).tocsr()
예제 #17
0
def array_coords(shape):
	y = shape[0]
	x = shape[1]
	out = scipy.empty((2,y,x))
	t = scipy.arange(y,dtype='f8')
	out[0] = scipy.tile(t,(x,1)).T
	t = scipy.arange(x,dtype='f8')
	out[1] = scipy.tile(t,(y,1))
	return out
예제 #18
0
 def f(self, x):
     N = self.xdim / 3
     coords = x.reshape((N, 3))
     distances = sqrt(
         scipy.sum((tile(coords,
                         (N, 1, 1)) - swapaxes(tile(coords,
                                                    (N, 1, 1)), 0, 1))**2,
                   axis=2)) + eye(N)
     return 2 * sum(ravel(distances**-12 - distances**-6))
예제 #19
0
def nandot(x1, x2):
    if len(x1.shape) == 1 and len(x2.shape) == 2:
        x1T = SP.tile(x1, [x2.shape[1], 1]).transpose()
        return SP.nansum(SP.multiply(x1T, x2), axis=0)
    elif len(x2.shape) == 1 and len(x1.shape) == 2:
        x2T = SP.tile(x2, [x1.shape[0], 1])
        return SP.nansum(SP.multiply(x1, x2T), axis=1)
    elif len(x1.shape) == 1 and len(x2.shape) == 1:
        return SP.nansum(SP.multiply(x1, x2))
    return None
예제 #20
0
def plot_stoch_value():
    #Compute Solution==========================================================
    sigma = .5
    mu = 4 * sigma
    K = 7
    Gamma, eps = discretenorm.discretenorm(K, mu, sigma)

    N = 100
    W = sp.linspace(0, 1, N)
    V = sp.zeros((N, K))

    u = lambda c: sp.sqrt(c)
    beta = 0.99

    X, Y = sp.meshgrid(W, W)
    Wdiff = Y - X
    index = Wdiff < 0
    Wdiff[index] = 0

    util_grid = u(Wdiff)

    util3 = sp.tile(util_grid[:, :, sp.newaxis], (1, 1, K))
    eps_grid = eps[sp.newaxis, sp.newaxis, :]
    eps_util = eps_grid * util3

    Gamma_grid = Gamma[sp.newaxis, :]

    delta = 1
    Vprime = V
    z = 0
    while (delta > 10**-9):
        z = z + 1
        V = Vprime
        gamV = Gamma_grid * V
        Expval = sp.sum(gamV, 1)
        Exp_grid = sp.tile(Expval[sp.newaxis, :, sp.newaxis], (N, 1, K))
        arg = eps_util + beta * Exp_grid
        arg[index] = -10 ^ 10
        Vprime = sp.amax(arg, 1)
        psi_ind = sp.argmax(arg, 1)
        psi = W[psi_ind]
        delta = sp.linalg.norm(Vprime - V)

    #============================================================
    #Plot 3D
    x = sp.arange(0, N)
    y = sp.arange(0, K)
    X, Y = sp.meshgrid(x, y)
    fig1 = plt.figure()
    ax1 = Axes3D(fig1)
    ax1.set_xlabel(r'$W$')
    ax1.set_ylabel(r'$\varepsilon$')
    ax1.set_zlabel(r'$V$')
    ax1.plot_surface(W[X], Y, sp.transpose(Vprime), cmap=cm.coolwarm)
    plt.savefig('stoch_value.pdf')
예제 #21
0
 def correlation_1s_1s(self, op1, op2, n1, d, return_exvals=False):
     """Computes a correlation function of two 1 site operators.
     
     The result is < op1_k op2_k+j > - <op1_k> * <op2_k+j> 
     with the operators acting on sites k and k + j, with j running from
     0 to d.
     
     Optionally returns the corresponding expectation values <op1_k+j> and 
     <op2_k+j>.
     
     Parameters
     ----------
     op1 : ndarray or callable
         The first operator, acting on the first site.
     op2 : ndarray or callable
         The second operator, acting on the second site.
     n1  : int
         Site to begin from.
     d : int
         The distance (number of sites) between the two sites acted on non-trivially.
     return_exvals : bool
         Whether to return expectation values for op1 and op2 for all sites.
         
     Returns
     -------
     ccf : sequence of complex128
         The correlation function across d + 1 sites (including site k).
     ex1 : sequence of complex128
         Expectation values of op1 for each site. Only if return_exvals == True.
     ex2 : sequence of complex128
         See ex1.
     """
     ex1 = sp.zeros((d + 1), dtype=sp.complex128)
     for j in range(d + 1):
         ex1[j] = self.expect_1s(op1, n1 + j)
         
     if op1 is op2:
         ex2 = ex1
     else:
         ex2 = sp.zeros((d + 1), dtype=sp.complex128)
         for j in range(d + 1):
             ex2[j] = self.expect_1s(op2, n1 + j)
         
     cf = self.expect_1s_1s(op1, op2, n1, n1 + d, return_intermediates=True)
         
     ccf = sp.zeros((d + 1), dtype=sp.complex128)
     for j in range(d + 1):
         ccf[j] = cf[j] - ex1[0] * ex2[j]
         
     if return_exvals:
         ex1_ = sp.tile(ex1, [(d + 1) // d + 1])[:d + 1]
         ex2_ = sp.tile(ex1, [(d + 1) // d + 1])[:d + 1]
         return ccf, ex1_, ex2_
     else:
         return ccf
예제 #22
0
 def correlation_1s_1s(self, op1, op2, n1, d, return_exvals=False):
     """Computes a correlation function of two 1 site operators.
     
     The result is < op1_k op2_k+j > - <op1_k> * <op2_k+j> 
     with the operators acting on sites k and k + j, with j running from
     0 to d.
     
     Optionally returns the corresponding expectation values <op1_k+j> and 
     <op2_k+j>.
     
     Parameters
     ----------
     op1 : ndarray or callable
         The first operator, acting on the first site.
     op2 : ndarray or callable
         The second operator, acting on the second site.
     n1  : int
         Site to begin from.
     d : int
         The distance (number of sites) between the two sites acted on non-trivially.
     return_exvals : bool
         Whether to return expectation values for op1 and op2 for all sites.
         
     Returns
     -------
     ccf : sequence of complex128
         The correlation function across d + 1 sites (including site k).
     ex1 : sequence of complex128
         Expectation values of op1 for each site. Only if return_exvals == True.
     ex2 : sequence of complex128
         See ex1.
     """
     ex1 = sp.zeros((d + 1), dtype=sp.complex128)
     for j in xrange(d + 1):
         ex1[j] = self.expect_1s(op1, n1 + j)
         
     if op1 is op2:
         ex2 = ex1
     else:
         ex2 = sp.zeros((d + 1), dtype=sp.complex128)
         for j in xrange(d + 1):
             ex2[j] = self.expect_1s(op2, n1 + j)
         
     cf = self.expect_1s_1s(op1, op2, n1, n1 + d, return_intermediates=True)
         
     ccf = sp.zeros((d + 1), dtype=sp.complex128)
     for j in xrange(d + 1):
         ccf[j] = cf[j] - ex1[0] * ex2[j]
         
     if return_exvals:
         ex1_ = sp.tile(ex1, [(d + 1) / d + 1])[:d + 1]
         ex2_ = sp.tile(ex1, [(d + 1) / d + 1])[:d + 1]
         return ccf, ex1_, ex2_
     else:
         return ccf
def plot_stoch_value():    
    #Compute Solution==========================================================
    sigma = .5
    mu = 4*sigma
    K = 7
    Gamma, eps = discretenorm.discretenorm(K,mu,sigma)
    
    N = 100
    W = sp.linspace(0,1,N)
    V = sp.zeros((N,K))
    
    u = lambda c: sp.sqrt(c)
    beta = 0.99
    
    X,Y= sp.meshgrid(W,W)
    Wdiff = Y-X
    index = Wdiff < 0
    Wdiff[index] = 0
    
    util_grid = u(Wdiff)
    
    util3 = sp.tile(util_grid[:,:,sp.newaxis],(1,1,K))
    eps_grid = eps[sp.newaxis,sp.newaxis,:]
    eps_util = eps_grid*util3
    
    Gamma_grid = Gamma[sp.newaxis,:]
    
    delta = 1
    Vprime = V
    z = 0
    while (delta > 10**-9):
        z= z+1
        V = Vprime
        gamV = Gamma_grid*V
        Expval = sp.sum(gamV,1)
        Exp_grid = sp.tile(Expval[sp.newaxis,:,sp.newaxis],(N,1,K))
        arg = eps_util+beta*Exp_grid
        arg[index] = -10^10
        Vprime = sp.amax(arg,1)
        psi_ind = sp.argmax(arg,1)
        psi = W[psi_ind]
        delta = sp.linalg.norm(Vprime - V)
    
    #============================================================    
    #Plot 3D    
    x=sp.arange(0,N)
    y=sp.arange(0,K)
    X,Y=sp.meshgrid(x,y)
    fig1 = plt.figure()
    ax1= Axes3D(fig1)
    ax1.set_xlabel(r'$W$')
    ax1.set_ylabel(r'$\varepsilon$')
    ax1.set_zlabel(r'$V$')
    ax1.plot_surface(W[X],Y,sp.transpose(Vprime), cmap=cm.coolwarm)
    plt.savefig('stoch_value.pdf')
def krondiag(v1,v2):
    """calcualte diagonal of kronecker(diag(v1),diag(v2)))
    note that this returns a non-flattened matrix
    """
    M1 = SP.tile(v1[:,SP.newaxis],[1,v2.shape[0]])
    M2 = SP.tile(v2[SP.newaxis,:],[v1.shape[0],1])
    M1 *= M2
    #RV  = (M1).ravel()
    #naive:
    #r=SP.kron(SP.diag(v1), SP.diag(v2)).diagonal()
    return M1
예제 #25
0
파일: runtest.py 프로젝트: scivision/SimISR
def makedata(testpath, tint):
    """ This will make the input data for the test case. The data will have cases
        where there will be enhancements in Ne, Ti and Te in one location. Each 
        case will have 3 integration periods. The first 3 integration periods will
        be the default set of parameters Ne=Ne=1e11 and Te=Ti=2000.
        Inputs
            testpath - Directory that will hold the data.
            tint - The integration time in seconds.
    """
    testpath = Path(testpath).expanduser()
    finalpath = testpath.joinpath('Origparams')
    if not finalpath.is_dir():
        finalpath.mkdir()
    data = sp.array([[1e11, 1100.], [1e11, 2100.]])
    z = (50. + sp.arange(50) * 10.)
    nz = len(z)
    params = sp.tile(data[sp.newaxis, sp.newaxis], (nz, 1, 1, 1))
    epnt = range(20, 22)
    p2 = sp.tile(params, (1, 4, 1, 1))
    #enhancement in Ne
    p2[epnt, 1, :, 0] = 5e11
    #enhancement in Ti
    p2[epnt, 2, 0, 1] = 2200.
    #enhancement in Te
    p2[epnt, 3, 1, 1] = 4200.
    coords = sp.column_stack((sp.ones(nz), sp.ones(nz), z))
    species = ['O+', 'e-']
    times = sp.array([[0, 1e3]])
    times2 = sp.column_stack((sp.arange(0, 4), sp.arange(1, 5))) * 3 * tint
    vel = sp.zeros((nz, 1, 3))
    vel2 = sp.zeros((nz, 4, 3))
    Icontstart = IonoContainer(coordlist=coords,
                               paramlist=params,
                               times=times,
                               sensor_loc=sp.zeros(3),
                               ver=0,
                               coordvecs=['x', 'y', 'z'],
                               paramnames=None,
                               species=species,
                               velocity=vel)
    Icont1 = IonoContainer(coordlist=coords,
                           paramlist=p2,
                           times=times2,
                           sensor_loc=sp.zeros(3),
                           ver=0,
                           coordvecs=['x', 'y', 'z'],
                           paramnames=None,
                           species=species,
                           velocity=vel2)

    finalfile = finalpath.joinpath('0 stats.h5')
    Icont1.saveh5(str(finalfile))
    Icontstart.saveh5(str(testpath.joinpath('startfile.h5')))
예제 #26
0
def krondiag(v1, v2):
    """calcualte diagonal of kronecker(diag(v1),diag(v2)))
    note that this returns a non-flattened matrix
    """
    M1 = SP.tile(v1[:, SP.newaxis], [1, v2.shape[0]])
    M2 = SP.tile(v2[SP.newaxis, :], [v1.shape[0], 1])
    M1 *= M2
    #RV  = (M1).ravel()
    #naive:
    #r=SP.kron(SP.diag(v1), SP.diag(v2)).diagonal()
    #pdb.set_trace()
    return M1
예제 #27
0
def add_landmark_to_map(mu, sigma, z, mapout, Q, scale):
    """Add a landmark to the UKF.
       We have to compute the uncertainty of the landmark given the current state
       (and its uncertainty) of the newly observed landmark. To this end, we also
       employ the unscented transform to propagate Q (sensor noise) through the
       current state"""
    
    # For computing sigma
    # FIND OUT WHY THEY WERE USING A GLOBAL ---> global scale;

    #add landmark to the map
    mapout += [z.idx]
    # TODO: Initialize its pose according to the measurement and add it to mu
    
    # Append the measurement to the state vector
    mu += [z['range'], z['bearing']]
    
    # Initialize its uncertainty and add it to sigma
    sigma = scipy.linalg.block_diag(sigma, Q)
    
    # Transform from [range, bearing] to the x/y location of the landmark
    # This operation intializes the uncertainty in the position of the landmark
    # Sample sigma points
    sig_pnts_new = compute_sigma_points(mu, sigma, scale)
    # Normalize!
    sig_pnts_new[2,:] = normalize_angle(sig_pnts_new[2,:])
    # Compute the xy location of the new landmark according to each sigma point
    newX = sig_pnts_new[0,:] + sig_pnts_new[-2,:]*scipy.cos(sig_pnts_new[2,:] + sig_pnts_new[-1,:])
    newY = sig_pnts_new[1,:] + sig_pnts_new[-2,:]*scipy.sin(sig_pnts_new[2,:] + sig_pnts_new[-1,:])
    # The last 2 components of the sigma points can now be replaced by the xy pose of the landmark
    sig_pnts_new[-2,:] = newX
    sig_pnts_new[-1,:] = newY
    
    # Recover mu and sigma
    #n = len(mu)
    #lam = scale - n;
    w0 = 1 - len(mu)/scale #lam/scale;
    wm = [w0, scipy.tile(1/(2*scale), (1, 2*n))]
    
    # Theta should be recovered by summing up the sines and cosines
    cosines = scipy.sum(scipy.cos(sig_pnts_new[2,:])*wm)
    sines = scipy.sum(scipy.sin(sig_pnts_new[2,:])*wm)
    
    # recompute the angle and normalize it
    mu_theta = scipy.arctan2(sines, cosines);
    mu = scipy.sum(sig_pnts_new*scipy.tile(wm, (sig_pnts_new.shape[0], 1)), 1)
    mu[2] = mu_theta

    diff = sig_pnts_new - scipy.tile(mu, (1, sig_pnts_new.shape[1]))

    # Normalize!
    diff[2,:] = normalize_angle(diff[2,:])
    sigma = scipy.dot(scipy.tile(wm, (diff.shape[0], 1))*diff, diff.T)
예제 #28
0
 def _gradQuadrForm(self, hyperparams,dK,columns =True ):
     """derivative of the quadtratic form w.r.t. kernel derivative matrix (dK)"""
     KV = self.get_covariances(hyperparams)
     Si = KV['Si']
     Ytilde = (KV['YSi'])
     if columns:
         UdKU = SP.dot(KV['Uc'].T,SP.dot(dK,KV['Uc']))
         SYUdKU = SP.dot((Ytilde*SP.tile(KV['Sr'][:,SP.newaxis],(1,Ytilde.shape[1]))),UdKU.T)
     else:
         UdKU = SP.dot(KV['Ur'].T,SP.dot(dK,KV['Ur']))
         SYUdKU = SP.dot(UdKU,(Ytilde*SP.tile(KV['Sc'][SP.newaxis,:],(Ytilde.shape[0],1))))
     return -SP.dot(Ytilde.ravel(),SYUdKU.ravel())
예제 #29
0
def array_coords(shape):
	"""
	Faster version of scipy.indices()
	"""
	y = shape[0]
	x = shape[1]
	out = scipy.empty((2,y,x))
	t = scipy.arange(y,dtype='f8')
	out[0] = scipy.tile(t,(x,1)).T
	t = scipy.arange(x,dtype='f8')
	out[1] = scipy.tile(t,(y,1))
	return out
예제 #30
0
    def __init__(self, gridObj, dt, nParticles=1.e11, tBunchSpacing=25.e-9):

        self.gridObj = gridObj
        self.nx = gridObj.getNxExt()
        self.ny = gridObj.getNyExt()
        self.np = gridObj.getNpExt()
        self.lx = gridObj.getLxExt()
        self.ly = gridObj.getLyExt()
        self.dx = gridObj.getDx()
        self.dy = gridObj.getDy()
        self.dt = dt

        self.nParticles = nParticles
        self.charge = spc.elementary_charge
        self.beamVelocity = spc.c
        self.circumference = 6900
        self.radiusSigma = 0.002
        self.radiusLimitSigma = 5
        self.xBeamCenter = 0.
        self.yBeamCenter = 0.
        self.tBunchSpacing = tBunchSpacing
        self.bunchLengthSigma = 0.1
        self.tBunchLengthSigma = self.bunchLengthSigma / self.beamVelocity
        self.bunchLengthLimitSigma = 5

        self.qTransversalProfile = sp.zeros(self.np)

        xMesh = self.gridObj.getXMesh()
        yMesh = self.gridObj.getYMesh()
        xCoords = sp.tile(xMesh, self.ny)
        yCoords = sp.reshape(sp.tile(yMesh, (self.nx, 1)).transpose(), self.np)
        beamPoints = (
            (self.radiusLimitSigma * self.radiusSigma +
             max([self.dx, self.dy]))**2 - xCoords**2 - yCoords**2) > 0
        self.qTransversalProfile[beamPoints] = ((sps.norm.cdf(
            sp.clip(
                (xCoords - self.xBeamCenter + self.dx / 2.) / self.radiusSigma,
                -self.radiusLimitSigma, self.radiusLimitSigma)) - sps.norm.cdf(
                    sp.clip(
                        (xCoords - self.xBeamCenter - self.dx / 2.) /
                        self.radiusSigma, -self.radiusLimitSigma,
                        self.radiusLimitSigma))) * (sps.norm.cdf(
                            sp.clip(
                                (yCoords - self.yBeamCenter + self.dy / 2.) /
                                self.radiusSigma, -self.radiusLimitSigma,
                                self.radiusLimitSigma)) - sps.norm.cdf(
                                    sp.clip(
                                        (yCoords - self.yBeamCenter -
                                         self.dy / 2.) / self.radiusSigma,
                                        -self.radiusLimitSigma,
                                        self.radiusLimitSigma))))[beamPoints]
        self.qTransversalProfile /= sp.sum(self.qTransversalProfile)
예제 #31
0
 def _gradQuadrFormX(self, hyperparams, dKx, columns=True):
     """derivative of the quadtratic form with.r.t. covarianceparameters for row or column covariance"""
     KV = self.get_covariances(hyperparams)
     Ytilde = (KV['YSi'])
     if columns:
         UY = SP.dot(KV['Uc'], Ytilde.T)
         UYS = UY * SP.tile(KV['Sr'][SP.newaxis, :], (Ytilde.shape[1], 1))
     else:
         UY = SP.dot(KV['Ur'], Ytilde)
         UYS = UY * SP.tile(KV['Sc'][SP.newaxis, :], (Ytilde.shape[0], 1))
     UYSYU = SP.dot(UYS, UY.T)
     trUYSYUdK = (UYSYU * dKx.T).sum(0)
     return -2.0 * trUYSYUdK
예제 #32
0
 def _gradQuadrFormX(self, hyperparams,dKx,columns =True ):
     """derivative of the quadtratic form with.r.t. covarianceparameters for row or column covariance"""
     KV = self.get_covariances(hyperparams)
     Ytilde = (KV['YSi'])
     if columns:
         UY=SP.dot(KV['Uc'],Ytilde.T)
         UYS = UY*SP.tile(KV['Sr'][SP.newaxis,:],(Ytilde.shape[1],1))
     else:
         UY=SP.dot(KV['Ur'],Ytilde)
         UYS = UY*SP.tile(KV['Sc'][SP.newaxis,:],(Ytilde.shape[0],1))
     UYSYU=SP.dot(UYS,UY.T)
     trUYSYUdK=(UYSYU*dKx.T).sum(0)
     return -2.0*trUYSYUdK
예제 #33
0
 def _gradQuadrForm(self, hyperparams, dK, columns=True):
     """derivative of the quadtratic form w.r.t. kernel derivative matrix (dK)"""
     KV = self.get_covariances(hyperparams)
     Si = KV['Si']
     Ytilde = (KV['YSi'])
     if columns:
         UdKU = SP.dot(KV['Uc'].T, SP.dot(dK, KV['Uc']))
         SYUdKU = SP.dot((Ytilde * SP.tile(KV['Sr'][:, SP.newaxis],
                                           (1, Ytilde.shape[1]))), UdKU.T)
     else:
         UdKU = SP.dot(KV['Ur'].T, SP.dot(dK, KV['Ur']))
         SYUdKU = SP.dot(UdKU, (Ytilde * SP.tile(KV['Sc'][SP.newaxis, :],
                                                 (Ytilde.shape[0], 1))))
     return -SP.dot(Ytilde.ravel(), SYUdKU.ravel())
예제 #34
0
 def __generate_potential_grid(self):
     from scipy import linspace, tile
     rowdim = self.canvas.shape[0]
     coldim = self.canvas.shape[1]
     pot_slice = linspace(p.potential_drop[0],p.potential_drop[1],rowdim)
     potential = tile(pot_slice,(coldim,1)).T
     self.potential_grid = potential
예제 #35
0
 def addFitPlot(self, fit):
     """add a contour plot on top using fitted data and add additional plots to sidebars (TODO) """
     logger.debug("adding fit plot with fit %s " % fit)
     if not fit.fitted:
         logger.error(
             "cannot add a fitted plot for unfitted data. Run fit first")
         return
     if not self.drawFitBool:
         logger.info("first fit plot so initialising contour plot")
         self.initialiseFitPlot()
     logger.info("attempting to set fit data")
     self.contourPositions = [
         scipy.tile(self.contourXS, len(self.contourYS)),
         scipy.repeat(self.contourYS, len(self.contourXS))
     ]  #for creating data necessary for gauss2D function
     zsravelled = fit.fitFunc(self.contourPositions,
                              *fit._getCalculatedValues())
     #        logger.debug("zs ravelled shape %s " % zsravelled.shape)
     self.contourZS = zsravelled.reshape(
         (len(self.contourYS), len(self.contourXS)))
     #        logger.debug("zs contour shape %s " % self.contourZS.shape)
     #        logger.info("shape contour = %s " % self.contourZS)
     self._fit_value.data = self.contourZS
     self.container.invalidate_draw()
     self.container.request_redraw()
     self.drawFitBool = True
예제 #36
0
def initial_cond(coords, mass, dipole, temp, F):
    cm_coords = coords - tile(center_of_mass(coords, mass), (coords.shape[0], 1))

    print "computing inertia tensor and principal axes of inertia"

    mol_I, mol_Ix = eig(inertia_tensor(cm_coords, mass))
    mol_I.sort()

    print "principal moments of inertia are: ", mol_I

    # compute the ratio of the dipole energy to the
    # rotational energy

    print "x = (mu*F / kB*T_R) = ", norm(dipole) * F / kB_au / temp

    # random initial angular velocity vector
    # magnitude set so that 0.5 * I * w**2.0 = kT
    w_mag = sqrt(2.0 * kB_au * temp / mol_I.mean())
    w0 = 2.0 * rand(3) - 1.0
    w0 = w0 / norm(w0) * w_mag

    # random initial orientation / random unit quaternion
    q0 = 2.0 * rand(4) - 1.0
    q0 = q0 / norm(q0)

    return q0, w0
예제 #37
0
def Problem1Real():
    beta = 0.9;
    T = 10;
    N = 100;
    u = lambda c: sp.sqrt(c);
    W = sp.linspace(0,1,N);
    X, Y = sp.meshgrid(W,W);
    Wdiff = Y-X
    index = Wdiff <0;
    Wdiff[index] = 0;
    util_grid = u(Wdiff);
    util_grid[index] = -10**10;
    V = sp.zeros((N,T+2));
    psi = sp.zeros((N,T+1));


    for k in xrange(T,-1,-1):
        val = util_grid + beta*sp.tile(sp.transpose(V[:,k+1]),(N,1));
        vt = sp.amax(val, axis = 1);
        psi_ind = sp.argmax(val,axis = 1)
        V[:,k]    = vt;
        psi[:,k]    = W[psi_ind];

    
    return V,psi
예제 #38
0
파일: trainer.py 프로젝트: ageek/sandbox
def make_batches(data, labels=None, batch_size=100):
    if labels is not None:
        num_labels = labels.shape[1]
        cls_data = [data[find(labels[:,i] == 1)] for i in range(num_labels)]
        cls_sizes = [d.shape[0] for d in cls_data]
        cls_sels = [permutation(range(s)) for s in cls_sizes]
        n = min(cls_sizes) * len(cls_sizes)
        batch_size = min(n, batch_size)
        lpb = batch_size / num_labels
        new_dat = []
        for i in range(n/batch_size):
            for sel, cd in zip(cls_sels, cls_data):
                new_dat.append(cd[sel[i*lpb:(i+1)*lpb]])
        if sparse.issparse(data):
            data = sparse.vstack(new_dat).tocsr()
        else:
            data = np.vstack(new_dat)
        labels = np.tile(np.repeat(np.eye(num_labels),lpb,0), (n/batch_size,1))
        n = len(labels)
        perm = range(n)
    else:
        n = data.shape[0]
        perm = permutation(range(n))
    i = 0
    while i < n:
        batch = perm[i:i+batch_size]
        i += batch_size
        yield (data[batch], None) if labels is None else (data[batch], labels[batch])
예제 #39
0
def makedata(testpath):
    """ This will make the input data for the test case. The data will have the 
        default set of parameters Ne=Ne=1e11 and Te=Ti=2000.
        Inputs
            testpath - Directory that will hold the data.
            
    """
    finalpath = testpath.joinpath('Origparams')
    if not finalpath.exists():
        finalpath.mkdir()
    data=SIMVALUES
    z = sp.linspace(50.,1e3,50)
    nz = len(z)
    params = sp.tile(data[sp.newaxis,sp.newaxis,:,:],(nz,1,1,1))
    coords = sp.column_stack((sp.ones(nz),sp.ones(nz),z))
    species=['O+','e-']
    times = sp.array([[0,1e3]])
    vel = sp.zeros((nz,1,3))
    Icont1 = IonoContainer(coordlist=coords,paramlist=params,times = times,sensor_loc = sp.zeros(3),ver =0,coordvecs =
        ['x','y','z'],paramnames=None,species=species,velocity=vel)
        
    finalfile = finalpath.joinpath('0 stats.h5')
    Icont1.saveh5(str(finalfile))
    # set start temp to 1000 K.
    Icont1.Param_List[:,:,:,1]=1e3
    Icont1.saveh5(str(testpath.joinpath('startfile.h5')))
    def fit(self, X, y):
        self.Xtr = X
        self.ytr = y

        if self.K is None:
            self.K = np.dot(X, X.T)

        [n_s, n_f] = X.shape
        if y.ndim == 1:
            y = scipy.reshape(y, (n_s, 1))

        S, U, ldelta0 = self.train_nullmodel(y,
                                             self.K,
                                             numintervals=self.numintervals,
                                             ldeltamin=self.ldeltamin,
                                             ldeltamax=self.ldeltamax,
                                             scale=self.scale,
                                             KSquare=self.KSquare,
                                             KSearch=self.KSearch)

        self.delta0 = scipy.exp(ldelta0)
        Sdi = 1. / (S + self.delta0)
        Sdi_sqrt = scipy.sqrt(Sdi)
        SUX = scipy.dot(U.T, X)
        SUX = SUX * scipy.tile(Sdi_sqrt, (n_f, 1)).T
        SUy = scipy.dot(U.T, y)
        SUy = SUy * scipy.reshape(Sdi_sqrt, (n_s, 1))

        self.clf.fit(SUX, SUy)
예제 #41
0
파일: gplvm_ard.py 프로젝트: wqren/pygp
 def get_covariances(self, hyperparams):
     if not self._is_cached(
             hyperparams) or self._active_set_indices_changed:
         #update covariance structure
         K = self.covar.K(hyperparams['covar'], self.x)
         #calc eigenvalue decomposition
         [S, U] = SP.linalg.eigh(K)
         #noise diagonal
         #depending on noise model this may be either a vector or a matrix
         Knoise = self.likelihood.Kdiag(hyperparams['lik'], self.x)
         #noise version of S
         Sn = Knoise + SP.tile(S[:, SP.newaxis], [1, self.d])
         #inverse
         Si = 1. / Sn
         #rotate data
         y_rot = SP.dot(U.T, self.y)
         #also store version of data rotated and Si applied
         y_roti = (y_rot * Si)
         self._covar_cache = {
             'S': S,
             'U': U,
             'K': K,
             'Knoise': Knoise,
             'Sn': Sn,
             'Si': Si,
             'y_rot': y_rot,
             'y_roti': y_roti
         }
         self._covar_cache['hyperparams'] = copy.deepcopy(hyperparams)
         pass
     #return update covar cache
     return self._covar_cache
예제 #42
0
    def compute_h_and_H(self, state):
        numVal = self.beaconLocations.shape[0]
        tiledState = sp.tile(sp.reshape(state, [1, 4]), [numVal, 1])

        #Compute the distances and the range model
        distances = self.beaconLocations - tiledState[:, :2]
        rangeModel = sp.linalg.norm(distances, axis=1, keepdims=True)

        #Compute the range rate model
        numerator = -distances[:,
                               0] * tiledState[:,
                                               2] - distances[:,
                                                              1] * tiledState[:,
                                                                              3]
        rangeRateModel = sp.reshape(numerator, rangeModel.shape) / rangeModel

        #Now to get the derivatives
        dRangeModel = sp.hstack([-distances, sp.zeros_like(distances)])
        dRangeModel = dRangeModel / rangeModel

        dRangeRateModel = (sp.hstack([tiledState[:, 2:], -distances]) -
                           rangeRateModel * dRangeModel)
        dRangeRateModel = dRangeRateModel / rangeModel

        return sp.vstack([rangeModel, rangeRateModel
                          ]), sp.vstack([dRangeModel, dRangeRateModel])
    def set_normal_free_energy(self):
        """
		Set free energy as a function of odorant; normal tuning curve.
		"""

        self.eps_base = self.mu_eps + self.normal_eps_tuning_prefactor* \
            sp.exp(-(1.*sp.arange(self.Mm))**2.0/(2.0* \
            self.normal_eps_tuning_width)**2.0)

        self.eps_base += random_matrix(self.Mm,
                                       params=[0, self.sigma_eps],
                                       seed=self.seed_eps)

        # If dual signal, use the average of the FULL signal nonzero components
        if self.Kk_split == 0:
            self.eps = self.WL_scaling * sp.log(self.mu_Ss0) + self.eps_base
        else:
            self.eps = self.WL_scaling*sp.log(sp.average(self.Ss\
                [self.Ss != 0])) + self.eps_base

        # Apply max and min epsilon value to each component
        self.min_eps = random_matrix(
            self.Mm,
            params=[self.mu_min_eps, self.sigma_min_eps],
            seed=self.seed_eps)
        self.max_eps = random_matrix(
            self.Mm,
            params=[self.mu_max_eps, self.sigma_max_eps],
            seed=self.seed_eps)
        self.eps = sp.maximum(self.eps, self.min_eps)
        self.eps = sp.minimum(self.eps, self.max_eps)

        # If an array of signals, replicate for each signal.
        if len(self.Ss.shape) > 1:
            self.eps = sp.tile(self.eps, [self.Ss.shape[1], 1]).T
def center_on_cos(raw_quadratures, phi0=None, omega=None, snap_omega=False):
    mean = scipy.average(raw_quadratures, axis=1)
    no_angles, no_pulses = raw_quadratures.shape
    model = Model(cos_model)
    offset, amplitude, phi0, omega = guess_initial_parameters(mean, phi0, omega)
    model.set_param_hint("offset", value=offset)
    model.set_param_hint("amplitude", min=0., value=amplitude)
    model.set_param_hint("phi0", value=phi0)
    model.set_param_hint("omega", min=0., value=omega)
    model.make_params(verbose=False)
    steps = scipy.arange(no_angles)
    res = model.fit(mean, x=steps, verbose=False)
    omega_param = res.params["omega"]
    if snap_omega:
        appx_omega = float(omega_param)
        no_pi_intervals = int(round(pi/appx_omega))
        omega = pi/no_pi_intervals
        omega_param.set(omega, vary=False)
        res.fit(mean, x=steps, verbose=False)
    d_value, p_value_ks = kstest(res.residual, 'norm')
    mean_fit = res.eval(x=steps)
    offset = mean-mean_fit
    aligned_quadratures = raw_quadratures - scipy.tile(offset, (no_pulses, 1)).T
    centered_quadratures = aligned_quadratures - float(res.params["offset"])
    return (centered_quadratures,
            float(omega_param), float(res.params["phi0"]), p_value_ks)
예제 #45
0
def makedata(testpath):
    """
        This will make the input data for the test case. The data will have the
        default set of parameters Ne=Ne=1e11 and Te=Ti=2000.
        Inputs
            testpath - Directory that will hold the data.

    """
    finalpath = testpath.joinpath('Origparams')
    if not finalpath.exists():
        finalpath.mkdir()
    data = SIMVALUES
    z = sp.linspace(50., 1e3, 50)
    nz = len(z)
    params = sp.tile(data[sp.newaxis, sp.newaxis, :, :], (nz, 1, 1, 1))
    coords = sp.column_stack((sp.ones(nz), sp.ones(nz), z))
    species = ['O+', 'e-']
    times = sp.array([[0, 1e9]])
    vel = sp.zeros((nz, 1, 3))
    Icont1 = IonoContainer(coordlist=coords,
                           paramlist=params,
                           times=times,
                           sensor_loc=sp.zeros(3),
                           ver=0,
                           coordvecs=['x', 'y', 'z'],
                           paramnames=None,
                           species=species,
                           velocity=vel)

    finalfile = finalpath.joinpath('0 stats.h5')
    Icont1.saveh5(str(finalfile))
    # set start temp to 1000 K.
    Icont1.Param_List[:, :, :, 1] = 1e3
    Icont1.saveh5(str(testpath.joinpath('startfile.h5')))
예제 #46
0
 def _sig_surface(self, siglevel):
     '''
     Significance surface for plotting.
     '''
     sig = wave_signif(self, siglevel, lag1(self.series))
     sig = sp.tile(sig, (len(self.series), 1)).T
     return sig
예제 #47
0
 def __generate_potential_grid(self):
     from scipy import linspace, tile
     rowdim = self.canvas.shape[0]
     coldim = self.canvas.shape[1]
     pot_slice = linspace(p.potential_drop[0], p.potential_drop[1], rowdim)
     potential = tile(pot_slice, (coldim, 1)).T
     self.potential_grid = potential
def con2vert(A, b):
    """
    Convert sets of constraints to a list of vertices (of the feasible region).
    If the shape is open, con2vert returns False for the closed property.
    """
    # Python implementation of con2vert.m by Michael Kleder (July 2005),
    #  available: http://www.mathworks.com/matlabcentral/fileexchange/7894
    #  -con2vert-constraints-to-vertices
    # Author: Michael Kelder (Original)
    #         Andre Campher (Python implementation)
    c = linalg.lstsq(mat(A), mat(b))[0]
    btmp = mat(b)-mat(A)*c
    D = mat(A)/matlib.repmat(btmp, 1, A.shape[1])

    fmatv = qhull(D, "Ft") #vertices on facets

    G  = zeros((fmatv.shape[0], D.shape[1]))
    for ix in range(0, fmatv.shape[0]):
        F = D[fmatv[ix, :], :].squeeze()
        G[ix, :] = linalg.lstsq(F, ones((F.shape[0], 1)))[0].transpose()

    V = G + matlib.repmat(c.transpose(), G.shape[0], 1)
    ux = uniqm(V)

    eps = 1e-13
    Av = dot(A, ux.T)
    bv = tile(b, (1, ux.shape[0]))
    closed = sciall(Av - bv <= eps)

    return ux, closed
예제 #49
0
 def _perform_fit(self):
     """Perform the fit using scipy optimise curve fit.
     We must supply x and y as one argument and zs as anothger. in the form
     xs: 0 1 2 0 1 2 0 
     ys: 0 0 0 1 1 1 2
     zs: 1 5 6 1 9 8 2
     Hence the use of repeat and tile in  positions and unravel for zs
     initially xs,ys is a linspace array and zs is a 2d image array
     """
     if self.xs is None or self.ys is None or self.zs is None:
         logger.warning(
             "attempted to fit data but had no data inside the Fit object. set xs,ys,zs first"
         )
         return ([], [])
     params = self._getParameters()
     if self.fitSubSpace:  #fit only the sub space
         #create xs, ys and zs which are appropriate slices of the arrays
         xs, ys, zs = self._get_subSpaceArrays()
     else:  #fit the whole array of data (slower)
         xs, ys, zs = self.xs, self.ys, self.zs
     positions = scipy.array([
         scipy.tile(xs, len(ys)),
         scipy.repeat(ys, len(xs))
     ])  #for creating data necessary for gauss2D function
     if self.fitTimeLimitBool:
         modelFitResult = self.lmfitModel.fit(scipy.ravel(zs),
                                              positions=positions,
                                              params=params,
                                              iter_cb=self.getFitCallback(
                                                  time.time()))
     else:  #no iter callback
         modelFitResult = self.lmfitModel.fit(scipy.ravel(zs),
                                              positions=positions,
                                              params=params)
     return modelFitResult
def _non_dominated_front_arr(iterable, key=lambda x: x, allowequality=True):
    """Return a subset of items from iterable which are not dominated by any
    other item in iterable.

    Faster version, based on boolean matrix manipulations.
    """
    items = list(iterable)
    fits = map(key, items)
    l = len(items)
    x = array(fits)
    a = tile(x, (l, 1, 1))
    b = a.transpose((1, 0, 2))
    if allowequality:
        ndom = sum(a <= b, axis=2)
    else:
        ndom = sum(a < b, axis=2)
    ndom = array(ndom, dtype=bool)
    res = set()
    for ii in range(l):
        res.add(ii)
        for ij in list(res):
            if ii == ij:
                continue
            if not ndom[ij, ii]:
                res.remove(ii)
                break
            elif not ndom[ii, ij]:
                res.remove(ij)
    return set(map(lambda i: items[i], res))
예제 #51
0
    def e_step(self):
        D,N = self.documents.shape
        phi_sum = sp.zeros(D, dtype=sp.double)

        # for d from 0 <= d < D:
        #     gam_sum = gam[d].sum()
        #     for n from 0 <= n < N:
        #         wn = documents[d,n]
        #         for k from 0 <= k < K:
        #             phi[d,n,k] = wn * beta[k,n] * np.exp(digamma(gam[d,k])-digamma(gam_sum))
        #         if phi[d,n].sum() > 0:
        #             phi[d,n] /= phi[d,n].sum()
        #     gamma[d] = alpha + phi[d].sum(0)

        gam_sums = self.gam.sum(1)
        for n in range(N):
            wns = self.documents[:,n]
            phi_sum[:] = 0.0
            for k in range(self.K):
                # print 'Wn:'
                # print wns
                # print 'Beta:'
                # print beta[k,n]
                # print 'Gamma:'
                # print np.exp(digamma(gam[:,k]))

                self.phi[:,n,k] = wns * self.beta[k,n] * sp.exp(digamma(self.gam[:,k])-digamma(gam_sums))
                # phi[:,n,k] = beta[k,n] * np.exp(digamma(gam[:,k])-digamma(gam_sums))
                # phi[:,n,k] = wns * beta[k,n] * np.exp(digamma(gam[:,k]))
                phi_sum += self.phi[:,n,k]
            phi_sum_ary = sp.tile(phi_sum, (self.K,1)).T
            self.phi[:,n] /= phi_sum_ary + 0.0000001

        self.gam = self.alpha + self.phi.sum(1)
예제 #52
0
파일: MNEfit.py 프로젝트: MarvinT/pyMNE
def MNEfit(stim,resp,order):
    # in order for dlogloss to work, we need to know -<g(yt(n),xt)>data
    # == calculate the constrained averages over the data set
    Nsamples = sp.size(stim,0)
    Ndim = sp.size(stim,1)
    psp = sp.mean(sp.mean(resp)) #spike probability (first constraint)
    avg = (1.0*stim.T*resp)/(Nsamples*1.0)
    avgs = sp.vstack((psp,avg))
    if(order > 1):
        avgsqrd = (stim.T*1.0)*(sp.array(sp.tile(resp,(1,Ndim)))*sp.array(stim))/(Nsamples*1.0)
        avgsqrd = sp.reshape(avgsqrd,(Ndim**2,1))
        avgs = sp.vstack((avgs,avgsqrd))
    
    #initialize params:
    pstart = sp.log(1/avgs[0,0] - 1)
    pstart = sp.hstack((pstart,(.001*(2*sp.random.rand(Ndim)-1))))
    if(order > 1):
        temp = .0005*(2*sp.random.rand(Ndim,Ndim)-1)
        pstart = sp.hstack((pstart,sp.reshape(temp+temp.T,(1,Ndim**2))[0]))
    
    #redefine functions with fixed vals:
    def logLoss(p):
        return LLF.log_loss(p, stim, resp, order)
    def dlogLoss(p):
        return LLF.d_log_loss(p, stim, avgs, order)
    #run the function:
    #pfinal = opt.fmin_tnc(logLoss,pstart,fprime=dlogLoss)
    # conjugate-gradient:
    pfinal = opt.fmin_cg(logLoss,pstart,fprime=dlogLoss)
    #pfinal = opt.fmin(logLoss,pstart,fprime=dlogLoss)
    return pfinal
예제 #53
0
def analysisdump(maindir, configfile, suptitle=None):
    """ This function will perform all of the plotting functions in this module
        given the main directory that all of the files live.
        Inputs
            maindir - The directory for the simulation.
            configfile - The name of the configuration file used.
            suptitle - The supertitle used on the files.
    """
    maindir = Path(maindir)
    plotdir = maindir.joinpath("AnalysisPlots")
    if not plotdir.is_dir():
        plotdir.mkdir()

    # plot spectrums
    filetemplate1 = str(maindir.joinpath("AnalysisPlots", "Spec"))
    filetemplate3 = str(maindir.joinpath("AnalysisPlots", "ACF"))
    filetemplate4 = str(maindir.joinpath("AnalysisPlots", "AltvTime"))
    (sensdict, simparams) = readconfigfile(configfile)
    angles = simparams["angles"]
    ang_data = sp.array([[iout[0], iout[1]] for iout in angles])
    if not sensdict["Name"].lower() in ["risr", "pfisr"]:
        ang_data_temp = ang_data.copy()
        beamlistlist = sp.array(simparams["outangles"]).astype(int)
        ang_data = sp.array([ang_data_temp[i].mean(axis=0) for i in beamlistlist])

    zenang = ang_data[sp.argmax(ang_data[:, 1])]
    rnggates = simparams["Rangegatesfinal"]
    rngchoices = sp.linspace(sp.amin(rnggates), sp.amax(rnggates), 4)
    angtile = sp.tile(zenang, (len(rngchoices), 1))
    coords = sp.column_stack((sp.transpose(rngchoices), angtile))
    times = simparams["Timevec"]

    filetemplate2 = str(maindir.joinpath("AnalysisPlots", "Params"))
    if simparams["Pulsetype"].lower() == "barker":
        params = ["Ne"]
        if suptitle is None:
            plotbeamparametersv2(times, configfile, maindir, params=params, filetemplate=filetemplate2, werrors=True)
        else:
            plotbeamparametersv2(
                times, configfile, maindir, params=params, filetemplate=filetemplate2, suptitle=suptitle, werrors=True
            )
    else:
        params = ["Ne", "Nepow", "Te", "Ti", "Vi"]
        if suptitle is None:
            plotspecs(coords, times, configfile, maindir, cartcoordsys=False, filetemplate=filetemplate1)
            plotacfs(coords, times, configfile, maindir, cartcoordsys=False, filetemplate=filetemplate3)
            plotbeamparametersv2(times, configfile, maindir, params=params, filetemplate=filetemplate2, werrors=True)
            beamvstime(configfile, maindir, params=params, filetemplate=filetemplate4)
        else:
            plotspecs(
                coords, times, configfile, maindir, cartcoordsys=False, filetemplate=filetemplate1, suptitle=suptitle
            )
            plotacfs(
                coords, times, configfile, maindir, cartcoordsys=False, filetemplate=filetemplate3, suptitle=suptitle
            )
            plotbeamparametersv2(
                times, configfile, maindir, params=params, filetemplate=filetemplate2, suptitle=suptitle, werrors=True
            )
            beamvstime(configfile, maindir, params=params, filetemplate=filetemplate4, suptitle=suptitle)
def my_bh_fdr(p_val_vec):
    index = scipy.argsort(p_val_vec)
    exp_err = scipy.vstack((float(len(p_val_vec))/scipy.arange(1,len(p_val_vec) + 1)*p_val_vec[index],
                                      scipy.tile(1, [1, len(p_val_vec)]))).min(axis = 0)
    exp_err = scipy.vstack((exp_err,exp_err[scipy.r_[0,scipy.arange(len(exp_err)-1)]])).max(axis=0)
    #scipy.r_[index[0], index[range(len(index)-1)]
    resort_index = scipy.argsort(index)                 
    return exp_err[resort_index]
예제 #55
0
def Problem6Real():
    sigma = .5
    mu = 4*sigma
    rho = .5
    sigmaZ = sigma/sp.sqrt(1-rho**2)
    w = 0.5 + rho/4
    baseSigma = w*sigma +(1-w)*sigmaZ
    K = 7
    eps, Gamma = tauchenhussey.tauchenhussey(K,mu,rho,sigma, baseSigma)
    eps = sp.reshape(eps,K)
    
    N = 100
    W = sp.linspace(0,1,N)
    V = sp.zeros((N,K))
    
    u = lambda c: sp.sqrt(c)
    beta = 0.9
    
    X,Y= sp.meshgrid(W,W)
    Wdiff = Y-X
    index = Wdiff < 0
    Wdiff[index] = 0
    
    util_grid = u(Wdiff)
    
    
    util3 = sp.tile(util_grid[:,:,sp.newaxis],(1,1,K))
    eps_grid = eps[sp.newaxis,sp.newaxis,:]
    eps_util = eps_grid*util3
    
    delta = 1
    Vprime = V
    z=0
    while (delta>10**-9):
        z=z+1
        V = Vprime
        Expval = sp.dot(V,sp.transpose(Gamma))
        Exp_grid = sp.tile(Expval[sp.newaxis,:,:],(N,1,1))
        arg = eps_util+beta*Exp_grid
        arg[index] = -10^10
        Vprime = sp.amax(arg,1)
        psi_ind = sp.argmax(arg,1)
        psi = W[psi_ind]
        delta = sp.linalg.norm(Vprime - V)
    
    return Vprime,psi
예제 #56
0
파일: dist.py 프로젝트: PMBio/GNetLMM
def sq_dist(X1,X2=None):
    """
    computes a matrix of all pariwise squared distances
    """
    if X2==None:
        X2 = X1
    assert X1.shape[1]==X2.shape[1], 'dimensions do not match'

    n = X1.shape[0]
    m = X2.shape[0]
    d = X1.shape[1]
    # (X1 - X2)**2 = X1**2 + X2**2 - 2X1X2
    X1sq = SP.reshape((X1**2).sum(1),n,1)
    X2sq = SP.reshape((X2**2).sum(1),m,1)

    K = SP.tile((X1*X1).sum(1),(m,1)).T + SP.tile((X2*X2).sum(1),(n,1)) - 2*SP.dot(X1,X2.T)
    return K
def SRIparams2iono(filename):

    fullfile = h5file(filename)
    fullfiledict = fullfile.readWholeh5file()

    #Size = Nrecords x Nbeams x Nranges x Nions+1 x 4 (fraction, temperature, collision frequency, LOS speed)
    fits = fullfiledict['/FittedParams']['Fits']
    (nt,nbeams,nrng,nspecs,nstuff) = fits.shape
    nlocs = nbeams*nrng
    fits = fits.transpose((1,2,0,3,4))
    fits = fits.reshape((nlocs,nt,nspecs,nstuff))
    #  Nrecords x Nbeams x Nranges
    Ne = fullfiledict['/FittedParams']['Ne']
    Ne = Ne.transpose((1,2,0))
    Ne = Ne.reshape((nlocs,nt))
    param_lists =sp.zeros((nlocs,nt,nspecs,2))
    param_lists[:,:,:,0] = fits[:,:,:,0]
    param_lists[:,:,:,1] = fits[:,:,:,1]
    param_lists[:,:,-1,0]=Ne
    Velocity = fits[:,:,0,3]


    if fullfiledict['/FittedParams']['IonMass']==16:
        species = ['O+','e-']
        pnames = sp.array([['Ni','Ti'],['Ne','Te']])

    time= fullfiledict['/Time']['UnixTime']
    time = time
    rng = fullfiledict['/FittedParams']['Range']
    bco = fullfiledict['/']['BeamCodes']
    angles = bco[:,1:3]
    (nang,nrg) = rng.shape

    allang = sp.tile(angles[:,sp.newaxis],(1,nrg,1))
    all_loc = sp.column_stack((rng.flatten(),allang.reshape(nang*nrg,2)))
    lkeep = ~ sp.any(sp.isnan(all_loc),1)
    all_loc = all_loc[lkeep]
    Velocity = Velocity[lkeep]
    param_lists = param_lists[lkeep]
    all_loc[:,0]=all_loc[:,0]*1e-3
    iono1 = IonoContainer(all_loc,param_lists,times=time,ver = 1,coordvecs = ['r','theta','phi'],
                          paramnames = pnames,species=species,velocity=Velocity)
                          
                          
                          
    # MSIS
    tn = fullfiledict['/MSIS']['Tn']
    tn = tn.transpose((1,2,0))
    tn = tn.reshape((nlocs,nt))
    
    
    startparams = sp.ones((nlocs,nt,2,2))
    startparams[:,:,0,1] = tn
    startparams[:,:,1,1] = tn
    startparams = startparams[lkeep]
    ionoS = IonoContainer(all_loc,startparams,times=time,ver = 1,coordvecs = ['r','theta','phi'],
                          paramnames = pnames,species=species)
    return iono1,ionoS
예제 #58
0
def connect_pores(network, pores1, pores2, labels=[], add_conns=True):
    r'''
    Returns the possible connections between two group of pores, and optionally
    makes the connections.

    Parameters
    ----------
    network : OpenPNM Network Object

    pores1 : array_like
        The first group of pores on the network

    pores2 : array_like
        The second group of pores on the network

    labels : list of strings
        The labels to apply to the new throats.  This argument is only needed
        if ``add_conns`` is True.

    add_conns : bool
        Indicates whether the connections should be added to the supplied
        network (default is True).  Otherwise, the connections are returned
        as an Nt x 2 array that can be passed directly to ``extend``.

    Notes
    -----
    It creates the connections in a format which is acceptable by
    the default OpenPNM connection ('throat.conns') and either adds them to
    the network or returns them.

    Examples
    --------
    >>> import OpenPNM
    >>> pn = OpenPNM.Network.TestNet()
    >>> pn.Nt
    300
    >>> pn.connect_pores(pores1=[22, 32], pores2=[16, 80, 68])
    >>> pn.Nt
    306
    >>> pn['throat.conns'][300:306]
    array([[16, 22],
           [22, 80],
           [22, 68],
           [16, 32],
           [32, 80],
           [32, 68]])

    '''
    size1 = _sp.size(pores1)
    size2 = _sp.size(pores2)
    array1 = _sp.repeat(pores1, size2)
    array2 = _sp.tile(pores2, size1)
    conns = _sp.vstack([array1, array2]).T
    if add_conns:
        extend(network=network, throat_conns=conns, labels=labels)
    else:
        return conns