コード例 #1
0
def simulate_data_v1(nCells = 5*10**4, nPersons = 40, seed = 123456, ratio_P =  [1., 1., 0.8, 0.1]):
	"""
		Simulates the data following the instruction presented in the article
	
	"""

	if seed is not None:
		npr.seed(seed)
		
		
		
	P = [0.49, 0.3, 0.2 , 0.01 ]
	Thetas = [np.array([0.,0, 0]), np.array([0, -2, 1]), np.array([1., 2, 0]), np.array([-2,2,1.5])]
	Z_Sigma  = [np.array([[1.27, 0.25, 0],[0.25, 0.27, -0.001],[0., -0.001, 0.001]]),
				np.array([[0.06, 0.04, -0.03],[0.04, 0.05, 0],[-0.03, 0., 0.09]]),
				np.array([[0.44, 0.08, 0.08],[0.08, 0.16, 0],[0.08, 0., 0.16]]),
				0.01*np.eye(3)]
	Sigmas = [0.1*np.eye(3), 0.1*spl.toeplitz([2.,0.5,0]),0.1* spl.toeplitz([2.,-0.5,1]),
			  0.1*spl.toeplitz([1.,.3,.3]) ] 
	
	nu = 100
		
	Y, act_Class, mus,  x = simulate_data_(Thetas, Z_Sigma, Sigmas, P, nu = nu, ratio_act = ratio_P, n_cells = nCells, n_persons = nPersons,
				seed = seed)
	
	
	return Y, act_Class, mus, Thetas, Sigmas, P
コード例 #2
0
def X1B(xtrain,btrain,ytest,degre=10):
    """ Excercie 1 partie B"""
    
    # (RXX+RBB) h = rXX         
    rXX=correlate(xtrain,xtrain)
    rBB=correlate(btrain,btrain)
    
    # Ah=b
    A=scl.toeplitz(rXX[:degre])+scl.toeplitz(rBB[:degre])
    b=rXX[:degre]
    h=npl.inv(A).dot(b)
    
    # Estimation de X par filtrage h de Y
    xest=scipy.signal.lfilter(h,[1],ytest)
    
    plt.figure(1)
    plt.title('X1 B')
    plt.subplot(3,1,1)
    plt.ylabel('y')
    plt.plot(ytest,'b-')        
    plt.subplot(3,1,2)
    plt.ylabel('x estime')
    plt.plot(xest,'r-')
    plt.subplot(3,1,3)
    plt.ylabel('y, x estime')
    plt.plot(ytest,'b-')
    plt.plot(xest,'r-')    
    plt.savefig('X1B.pdf')
    plt.close()
    
    if inspection:
        global X1Bvars
        X1Bvars=inspect.currentframe().f_locals         
コード例 #3
0
def test_mv(sim, d, n, mv, string):
	row = np.zeros(d)
	row2 = np.zeros(d)
	row[0] = 4.
	row2[0] = 2.
	if d > 1:
		row[1] = -1
		row2[1] = 1
	
	prior = {'mu':-10 * np.ones(d),'Sigma':spl.toeplitz(row)}
	param = {'Sigma': spl.toeplitz(row2)}
	Y = np.empty((n,d))
	L = np.linalg.cholesky(param['Sigma'])
	for i in range(n):  # @UnusedVariable
		Y[i,:] = np.dot(L,np.random.randn(d,1)).reshape((d,))
	dist = mv(prior = prior, param = param)  
	mu_est = np.zeros((sim,dist.mu_p.shape[0]))
	t0 = time.time()
	dist.set_data(Y)
	for i in range(sim):
		dist.set_parameter(param)
		dist.set_prior(prior)
		dist.set_data(Y)
		mu_est[i,:] = dist.sample()
	
	t1 = time.time()
	string += " %.4f msec/sim (sim, n ,d ) = (%d %d,%d) "%(1000*np.double(t1-t0)/sim, sim, n, d )
	print(string)
コード例 #4
0
def main():

    
    print('Testing SVM class')
    height=3
    width=5
    samples=10
    #Create the D matrix
    D_w=toeplitz(np.hstack([1,np.zeros(width-2)]),np.hstack([1,-1, np.zeros(width-2)]))
    D_h=toeplitz(np.hstack([1,np.zeros(height-2)]),np.hstack([1,-1, np.zeros(height-2)]))
#    D=np.c_[D,np.zeros(width-1)]
#    D[width-2,width-1]=-1
    D2=np.kron(D_w,np.eye(height))
    D3=np.kron(np.eye(width),D_h)
    D=np.r_[D2,D3]
    
    
    w=np.random.randn(height*width)
    X=np.random.randn(samples,height*width)
    Y=np.random.randint(2,size=samples)
    Y[Y==0]=-1
    
    SHuber=SVMHuber(huber=0.5)
    print w
    print X
    print SHuber.gradf(X,Y,w)
    print SHuber.f(X,Y,w)
    
    print('Testing P class')
    KTVS=KtvSVM(D,huber=0.01,lambda1=1,lambda2=0.1,k=1)
    print KTVS.f(X,Y,w)
    print KTVS.gradf(X,Y,w)
コード例 #5
0
def estimate_time_constant(Y, sn, p=None, lags=5, include_noise=False, pixels=None):
    """ 
    Estimating global time constants for the dataset Y through the autocovariance function (optional).
    The function is no longer used in the standard setting of the algorithm since every trace has its own 
    time constant.
    Inputs:
    Y: np.ndarray (2D)
        input movie data with time in the last axis
    p: positive integer
        order of AR process, default: 2
    lags: positive integer
        number of lags in the past to consider for determining time constants. Default 5
    include_noise: Boolean
        Flag to include pre-estimated noise value when determining time constants. Default: False
    pixels: np.ndarray
        Restrict estimation to these pixels (e.g., remove saturated pixels). Default: All pixels
        
    Output:
    g:  np.ndarray (p x 1) 
        Discrete time constants
    """
    if p is None:
        raise Exception("You need to define p")

    if pixels is None:
        pixels = np.arange(np.size(Y) / np.shape(Y)[-1])

    from scipy.linalg import toeplitz

    npx = len(pixels)
    g = 0
    lags += p
    XC = np.zeros((npx, 2 * lags + 1))
    for j in range(npx):
        XC[j, :] = np.squeeze(axcov(np.squeeze(Y[pixels[j], :]), lags))

    gv = np.zeros(npx * lags)
    if not include_noise:
        XC = XC[:, np.arange(lags - 1, -1, -1)]
        lags -= p

    A = np.zeros((npx * lags, p))
    for i in range(npx):
        if not include_noise:
            A[i * lags + np.arange(lags), :] = toeplitz(
                np.squeeze(XC[i, np.arange(p - 1, p + lags - 1)]), np.squeeze(XC[i, np.arange(p - 1, -1, -1)])
            )
        else:
            A[i * lags + np.arange(lags), :] = toeplitz(
                np.squeeze(XC[i, lags + np.arange(lags)]), np.squeeze(XC[i, lags + np.arange(p)])
            ) - (sn[i] ** 2) * np.eye(lags, p)
            gv[i * lags + np.arange(lags)] = np.squeeze(XC[i, lags + 1 :])

    if not include_noise:
        gv = XC[:, p:].T
        gv = np.squeeze(np.reshape(gv, (np.size(gv), 1), order="F"))

    g = np.dot(np.linalg.pinv(A), gv)

    return g
コード例 #6
0
def estimate_time_constant(Y, sn, p = 2, lags = 5, include_noise = False, pixels = None):
        
    if pixels is None:
        pixels = np.arange(np.size(Y)/np.shape(Y)[-1])
    
    from scipy.linalg import toeplitz    
    
    npx = len(pixels)
    g = 0
    lags += p
    XC = np.zeros((npx,2*lags+1))
    for j in range(npx):
        XC[j,:] = np.squeeze(axcov(np.squeeze(Y[pixels[j],:]),lags))
        
    gv = np.zeros(npx*lags)
    if not include_noise:
        XC = XC[:,np.arange(lags-1,-1,-1)]
        lags -= p
        
    A = np.zeros((npx*lags,p))
    for i in range(npx):
        if not include_noise:
            A[i*lags+np.arange(lags),:] = toeplitz(np.squeeze(XC[i,np.arange(p-1,p+lags-1)]),np.squeeze(XC[i,np.arange(p-1,-1,-1)])) 
        else:
            A[i*lags+np.arange(lags),:] = toeplitz(np.squeeze(XC[i,lags+np.arange(lags)]),np.squeeze(XC[i,lags+np.arange(p)])) - (sn[i]**2)*np.eye(lags,p)
            gv[i*lags+np.arange(lags)] = np.squeeze(XC[i,lags+1:])
        
    if not include_noise:
        gv = XC[:,p:].T
        gv = np.squeeze(np.reshape(gv,(np.size(gv),1),order='F'))
        
    g = np.dot(np.linalg.pinv(A),gv)
    
    return g
コード例 #7
0
def simulate_data(nCells = 5*10**4, nPersons = 40, seed = 123456, ratio_P =  [1., 1., 0.8, 0.1]):
	"""
		Simulates the data following the instruction presented in the article
	
	"""

	if seed != None:
		npr.seed(seed)
		
		
	nClass = 4
	dim    = 3
	P = [0.49, 0.3, 0.2 , 0.01 ]
	Thetas = [np.array([0.,0, 0]), np.array([0, -2, 1]), np.array([1., 2, 0]), np.array([-2,2,1.5])]
	Z_Sigma  = [np.array([[1.27, 0.25, 0],[0.25, 0.27, -0.001],[0., -0.001, 0.001]]),
			    np.array([[0.06, 0.04, -0.03],[0.04, 0.05, 0],[-0.03, 0., 0.09]]),
			    np.array([[0.44, 0.08, 0.08],[0.08, 0.16, 0],[0.08, 0., 0.16]]),
			    0.01*np.eye(3)]
	Sigmas = [0.1*np.eye(3), 0.1*spl.toeplitz([2.,0.5,0]),0.1* spl.toeplitz([2.,-0.5,1]),
			  0.1*spl.toeplitz([1.,.3,.3]) ] 
	

		
	act_Class = np.zeros((nPersons,4))
	for i in range(nClass):
		act_Class[:np.ceil(nPersons*ratio_P[i]),i] = 1.
	Y = []
	
	nu  = 100
	mus = []
	for i in range(nPersons):
		mix_obj = GMM.mixture(K = np.int(np.sum(act_Class[i, :])))
		theta_temp  = []
		sigma_temp  = []
		for j in range(nClass):
			if act_Class[i, j] == 1:
				theta_temp.append(Thetas[j] +  npr.multivariate_normal(np.zeros(3), Z_Sigma[j]))
				sigma_temp.append(wishart.invwishartrand(nu, (nu - dim - 1)* Sigmas[j]))
			else:
				theta_temp.append(np.ones(dim)*np.NAN)
				sigma_temp.append(np.ones((dim,dim))*np.NAN)
		theta_temp_ = [  theta_temp[aC] for aC in np.where(act_Class[i, :] == 1)[0]]
		sigma_temp_ = [  sigma_temp[aC] for aC in np.where(act_Class[i, :] == 1)[0]]

		mix_obj.mu = theta_temp_
		mus.append(theta_temp)
		mix_obj.sigma = sigma_temp_
		
		
		p_ = np.array([ (0.2*np.random.rand()+0.9) * P[aC]  for aC in np.where(act_Class[i, :] == 1)[0]]  )
		p_ /= np.sum(p_)
		mix_obj.p = p_
		mix_obj.d = dim
		Y.append(mix_obj.simulate_data(nCells))
	mus = np.array(mus)
	return Y, act_Class, mus.T, Thetas, Sigmas, P
コード例 #8
0
ファイル: nested_maest.py プロジェクト: creasyw/hopcs
def maestx(y, pcs, q, norder=3,samp_seg=1,overlap=0):
    """
    MAEST  MA parameter estimation via the GM-RCLS algorithm, with Tugnait's fix
        y  - time-series (vector or matrix)
        q  - MA order
        norder - cumulant-order to use  [default = 3]
        samp_seg - samples per segment for cumulant estimation
                  [default: length of y]
        overlap - percentage overlap of segments  [default = 0]
        flag - 'biased' or 'unbiased'          [default = 'biased']
        Return: estimated MA parameter vector
    """
    assert norder>=2 and norder<=4, "Cumulant order must be 2, 3, or 4!"
    nsamp = len(y)
    overlap = max(0, min(overlap,99))

    c2 = cumx(y, pcs, 2,q, samp_seg, overlap)
    c2 = np.hstack((c2, np.zeros(q)))
    cumd = cumx(y, pcs, norder,q,samp_seg,overlap,0,0)[::-1]
    cumq = cumx(y, pcs, norder,q,samp_seg,overlap,q,q)
    cumd = np.hstack((cumd, np.zeros(q)))
    cumq[:q] = np.zeros(q)

    cmat = toeplitz(cumd, np.hstack((cumd[0],np.zeros(q))))
    rmat = toeplitz(c2,   np.hstack((c2[0],np.zeros(q))))
    amat0 = np.hstack((cmat, -rmat[:,1:q+1]))
    rvec0 = c2

    cumq = np.hstack((cumq[2*q:q-1:-1], np.zeros(q)))
    cmat4 = toeplitz(cumq, np.hstack((cumq[0],np.zeros(q))))
    c3   = cumd[:2*q+1]
    amat0 = np.vstack((np.hstack((amat0, np.zeros((3*q+1,1)))), \
            np.hstack((np.hstack((np.zeros((2*q+1,q+1)), cmat4[:,1:q+1])), \
            np.reshape(-c3,(len(c3),1))))))
    rvec0 = np.hstack((rvec0, -cmat4[:,0]))

    row_sel = range(q)+range(2*q+1,3*q+1)+range(3*q+1,4*q+1)+range(4*q+2,5*q+2)
    amat0 = amat0[row_sel,:]
    rvec0 = rvec0[row_sel]

    bvec = lstsq(amat0, rvec0)[0]
    b1 = bvec[1:q+1]/bvec[0]
    b2 = bvec[q+1:2*q+1]
    if norder == 3:
        if all(b2 > 0):
            b1 = np.sign(b1) * np.sqrt(0.5*(b1**2 + b2))
        else:
            print 'MAEST: alternative solution b1 used'
    else:
        b1 = np.sign(b2)* (abs(b1) + abs(b2)**(1./3))/2
#        if all(np.sign(b2) == np.sign(b1)):
#            b1 = np.sign(b1)* (abs(b1) + abs(b2)**(1./3))/2
#        else:
#            print 'MAEST: alternative solution b1 used'
    return np.hstack(([1], b1))
コード例 #9
0
def kern2mat(H, size):
    """Create a matrix corresponding to the application of the convolution kernel H.
    
    The size argument should be a tuple (output_size, input_size).
    """
    N = H.size
    half = int((N - 1) / 2)
    Nout, Mout = size
    if Nout == Mout:
        return linalg.toeplitz(np.r_[H[half:], np.zeros(Nout - half)], np.r_[H[half:], np.zeros(Mout-half)])
    else:
        return linalg.toeplitz(np.r_[H, np.zeros(Nout - N)], np.r_[H[-1], np.zeros(Mout-1)])
コード例 #10
0
ファイル: regression.py プロジェクト: Garyfallidis/nipy
    def _setup_bias_correct(self, model):

        R = np.identity(model.design.shape[0]) - np.dot(model.design, model.calc_beta)
        M = np.zeros((self.p+1,)*2)
        I = np.identity(R.shape[0])

        for i in range(self.p+1):
            Di = np.dot(R, toeplitz(I[i]))
            for j in range(self.p+1):
                Dj = np.dot(R, toeplitz(I[j]))
                M[i,j] = np.diagonal((np.dot(Di, Dj))/(1.+(i>0))).sum()
                    
        self.invM = L.inv(M)
        return
コード例 #11
0
ファイル: xdawn.py プロジェクト: vwyart/mne-python
def _least_square_evoked(data, events, event_id, tmin, tmax, sfreq):
    """Least square estimation of evoked response from data.

    Parameters
    ----------
    data : ndarray, shape (n_channels, n_times)
        The data to estimates evoked
    events : ndarray, shape (n_events, 3)
        The events typically returned by the read_events function.
        If some events don't match the events of interest as specified
        by event_id, they will be ignored.
    event_id : dict
        The id of the events to consider
    tmin : float
        Start time before event.
    tmax : float
        End time after event.
    sfreq : float
        Sampling frequency.

    Returns
    -------
    evokeds_data : dict of ndarray
        A dict of evoked data for each event type in event_id.
    toeplitz : dict of ndarray
        A dict of toeplitz matrix for each event type in event_id.
    """
    nmin = int(tmin * sfreq)
    nmax = int(tmax * sfreq)

    window = nmax - nmin
    n_samples = data.shape[1]
    toeplitz_mat = dict()
    full_toep = list()
    for eid in event_id:
        # select events by type
        ix_ev = events[:, -1] == event_id[eid]

        # build toeplitz matrix
        trig = np.zeros((n_samples, 1))
        ix_trig = (events[ix_ev, 0]) + nmin
        trig[ix_trig] = 1
        toep_mat = linalg.toeplitz(trig[0:window], trig)
        toeplitz_mat[eid] = toep_mat
        full_toep.append(toep_mat)

    # Concatenate toeplitz
    full_toep = np.concatenate(full_toep)

    # least square estimation
    predictor = np.dot(linalg.pinv(np.dot(full_toep, full_toep.T)), full_toep)
    all_evokeds = np.dot(predictor, data.T)
    all_evokeds = np.vsplit(all_evokeds, len(event_id))

    # parse evoked response
    evoked_data = dict()
    for idx, eid in enumerate(event_id):
        evoked_data[eid] = all_evokeds[idx].T

    return evoked_data, toeplitz_mat
コード例 #12
0
def yule_walker_acov(acov, order=1, method="unbiased", df=None, inv=False):
    """
    Estimate AR(p) parameters from acovf using Yule-Walker equation.


    Parameters
    ----------
    acov : array-like, 1d
        auto-covariance
    order : integer, optional
        The order of the autoregressive process.  Default is 1.
    inv : bool
        If inv is True the inverse of R is also returned.  Default is False.

    Returns
    -------
    rho : ndarray
        The estimated autoregressive coefficients
    sigma
        TODO
    Rinv : ndarray
        inverse of the Toepliz matrix

    """


    R = toeplitz(r[:-1])

    rho = np.linalg.solve(R, r[1:])
    sigmasq = r[0] - (r[1:]*rho).sum()
    if inv == True:
        return rho, np.sqrt(sigmasq), np.linalg.inv(R)
    else:
        return rho, np.sqrt(sigmasq)
コード例 #13
0
def X1A(xtrain,ytrain,ytest,degre=10):
    """ Excercie 1 partie A"""
    
    # RYY h = rXY     
    rYY=correlate(ytrain,ytrain)
    rXY=correlate(xtrain,ytrain)
    
    # Ah=b
    A=scl.toeplitz(rYY[:degre])
    b=rXY[:degre]
    h=npl.inv(A).dot(b)
    
    # Estimation de X par filtrage h de Y
    xest=scipy.signal.lfilter(h,[1],ytest)
    
    plt.figure(1)
    plt.title('X1 A')
    plt.subplot(3,1,1)
    plt.ylabel('y')
    plt.plot(ytest,'b-')        
    plt.subplot(3,1,2)
    plt.ylabel('x estime')
    plt.plot(xest,'r-')
    plt.subplot(3,1,3)
    plt.ylabel('y, x estime')
    plt.plot(ytest,'b-')
    plt.plot(xest,'r-')    
    plt.savefig('X1A.pdf')
    plt.close()
    
    if inspection:
        global X1Avars
        X1Avars=inspect.currentframe().f_locals
コード例 #14
0
ファイル: xdawn.py プロジェクト: annapasca/mne-python
def _least_square_evoked(epochs_data, events, tmin, sfreq):
    """Least square estimation of evoked response from epochs data.

    Parameters
    ----------
    epochs_data : array, shape (n_channels, n_times)
        The epochs data to estimate evoked.
    events : array, shape (n_events, 3)
        The events typically returned by the read_events function.
        If some events don't match the events of interest as specified
        by event_id, they will be ignored.
    tmin : float
        Start time before event.
    sfreq : float
        Sampling frequency.

    Returns
    -------
    evokeds : array, shape (n_class, n_components, n_times)
        An concatenated array of evoked data for each event type.
    toeplitz : array, shape (n_class * n_components, n_channels)
        An concatenated array of toeplitz matrix for each event type.
    """

    n_epochs, n_channels, n_times = epochs_data.shape
    tmax = tmin + n_times / float(sfreq)

    # Deal with shuffled epochs
    events = events.copy()
    events[:, 0] -= events[0, 0] + int(tmin * sfreq)

    # Contruct raw signal
    raw = _construct_signal_from_epochs(epochs_data, events, sfreq, tmin)

    # Compute the independent evoked responses per condition, while correcting
    # for event overlaps.
    n_min, n_max = int(tmin * sfreq), int(tmax * sfreq)
    window = n_max - n_min
    n_samples = raw.shape[1]
    toeplitz = list()
    classes = np.unique(events[:, 2])
    for ii, this_class in enumerate(classes):
        # select events by type
        sel = events[:, 2] == this_class

        # build toeplitz matrix
        trig = np.zeros((n_samples, 1))
        ix_trig = (events[sel, 0]) + n_min
        trig[ix_trig] = 1
        toeplitz.append(linalg.toeplitz(trig[0:window], trig))

    # Concatenate toeplitz
    toeplitz = np.array(toeplitz)
    X = np.concatenate(toeplitz)

    # least square estimation
    predictor = np.dot(linalg.pinv(np.dot(X, X.T)), X)
    evokeds = np.dot(predictor, raw.T)
    evokeds = np.transpose(np.vsplit(evokeds, len(classes)), (0, 2, 1))
    return evokeds, toeplitz
コード例 #15
0
	def setUp(self):
		npr.seed(123456)
		self.nClass = 3
		self.dim    = 3
		self.P = [0.4, 0.3, 0.3]
		self.Thetas = [np.array([0.,0, 0]), np.array([0., -2, 1]), np.array([1., 2, 0])]
		self.Sigmas = [ 0.1*spl.toeplitz([2.,0.5,0]),0.1* spl.toeplitz([2.,-0.5,1]),
			  0.1*spl.toeplitz([1.,.3,.3]) ] 
		
		mix_obj = mixP(K = self.nClass)
		mix_obj.mu    = cp.deepcopy(self.Thetas)
		mix_obj.sigma = cp.deepcopy(self.Sigmas)
		
		mix_obj.p = cp.deepcopy(self.P)
		mix_obj.d = self.dim
		self.Y = mix_obj.simulate_data(self.n)
コード例 #16
0
ファイル: cov_struct.py プロジェクト: Bonfils-ebu/statsmodels
    def covariance_matrix_grid(self, endog_expval, index):

        from scipy.linalg import toeplitz
        r = np.zeros(len(endog_expval))
        r[0] = 1
        r[1:self.max_lag + 1] = self.dep_params
        return toeplitz(r), True
コード例 #17
0
def pacf(x, periodogram=True, lagmax=None):
    """Computes the partial autocorrelation function of series `x` along
    the given axis.

:Parameters:
    x : 1D array
        Time series.
    periodogram : {True, False} optional
        Whether to use a periodogram-like estimate of the ACF or not.
    lagmax : {None, int} optional
        Maximum lag. If None, the maximum lag is set to n/4+1, with n the series
        length.
    """
    acfx = acf(x, periodogram)[:,None]
    #
    if lagmax is None:
        n = len(x) // 4 + 1
    else:
        n = min(lagmax, len(x))
    #
    arkf = np.zeros((n,n),float)
    arkf[1,1] = acfx[1,0]
    for k in range(2,n):
        res = solve(toeplitz(acfx[:k]), acfx[1:k+1]).squeeze()
        arkf[k,1:k+1] = res
    return arkf.diagonal()
コード例 #18
0
ファイル: ssa_root.py プロジェクト: konggas/ssa-py
def ssaeig(x, M):
    """Syntax: [E,V,C]=ssaeig(x, M);
    This function starts an SSA of series 'x', for embedding dimension 'M'.
    Returns:    E - eigenfunction matrix in standard form
                     (columns are the eigenvectors, or T-EOFs)
                V - vector containing variances (unnormalized eigenvalues)
                C - Covariance Matrix
                E and V are ordered from large to small.
    See section 2 of Vautard, Yiou, and Ghil, Physica D 58, 95-126, 1992."""
    from scipy.linalg import toeplitz, eigh
    N=size(x)
    if not isinstance(N, int):
        if N[0] < N[1]: x = transpose(x); N = size(x)
        else:   raise ValueError('Hey! Vectors only!')
    if M-1 >= N:  raise ValueError('Hey! Too big a lag!')

    acov=ac(x, M-1)            # calculate autocovariance estimates
    Tc = toeplitz(acov)        # create Toeplitz matrix (trajectory matrix)
    C = Tc
    L,E = eigh(Tc)          # calculate eigenvectors, values of T
    V = abs(L)              # create eigenvalue vector
    ind = argsort(V)        # sort eigenvalues
    ind = ind[M::-1]
    V = V[ind]
    E = E[:][:,ind]         # sort eigenvectors
    return [E,V,C]
コード例 #19
0
ファイル: lin_to_matrix.py プロジェクト: Aharobot/cvxpy
def conv_mat(lin_op):
    """Returns the coefficient matrix for CONV linear op.

    Parameters
    ----------
    lin_op : LinOp
        The conv linear op.

    Returns
    -------
    list of NumPy matrix
        The matrix representing the convolution operation.
    """
    constant = const_mat(lin_op.data)
    # Cast to 1D.
    constant = intf.from_2D_to_1D(constant)

    # Create a Toeplitz matrix with constant as columns.
    rows = lin_op.size[0]
    nonzeros = lin_op.data.size[0]
    toeplitz_col = np.zeros(rows)
    toeplitz_col[0:nonzeros] = constant

    cols = lin_op.args[0].size[0]
    toeplitz_row = np.zeros(cols)
    toeplitz_row[0] = constant[0]
    coeff = sp_la.toeplitz(toeplitz_col, toeplitz_row)

    return [np.matrix(coeff)]
コード例 #20
0
ファイル: arima_process.py プロジェクト: NCTA/statsmodels
def arma_pacf(ar, ma, nobs=10):
    '''partial autocorrelation function of an ARMA process

    Parameters
    ----------
    ar : array_like, 1d
        coefficient for autoregressive lag polynomial, including zero lag
    ma : array_like, 1d
        coefficient for moving-average lag polynomial, including zero lag
    nobs : int
        number of terms (lags plus zero lag) to include in returned pacf

    Returns
    -------
    pacf : array
        partial autocorrelation of ARMA process given by ar, ma

    Notes
    -----
    solves yule-walker equation for each lag order up to nobs lags

    not tested/checked yet
    '''
    apacf = np.zeros(nobs)
    acov = arma_acf(ar,ma, nobs=nobs+1)

    apacf[0] = 1.
    for k in range(2,nobs+1):
        r = acov[:k];
        apacf[k-1] = linalg.solve(linalg.toeplitz(r[:-1]), r[1:])[-1]
    return apacf
コード例 #21
0
ファイル: test_blas.py プロジェクト: BranYang/scipy
    def test_gbmv(self):
        seed(1234)
        for ind, dtype in enumerate(DTYPES):
            n = 7
            m = 5
            kl = 1
            ku = 2
            # fake a banded matrix via toeplitz
            A = toeplitz(append(rand(kl+1), zeros(m-kl-1)),
                         append(rand(ku+1), zeros(n-ku-1)))
            A = A.astype(dtype)
            Ab = zeros((kl+ku+1, n), dtype=dtype)

            # Form the banded storage
            Ab[2, :5] = A[0, 0]  # diag
            Ab[1, 1:6] = A[0, 1]  # sup1
            Ab[0, 2:7] = A[0, 2]  # sup2
            Ab[3, :4] = A[1, 0]  # sub1

            x = rand(n).astype(dtype)
            y = rand(m).astype(dtype)
            alpha, beta = dtype(3), dtype(-5)

            func, = get_blas_funcs(('gbmv',), dtype=dtype)
            y1 = func(m=m, n=n, ku=ku, kl=kl, alpha=alpha, a=Ab,
                      x=x, y=y, beta=beta)
            y2 = alpha * A.dot(x) + beta * y
            assert_array_almost_equal(y1, y2)
コード例 #22
0
ファイル: synchronizer_v2.py プロジェクト: icopavan/gr-burst
 def determineOptimalFilter(self, x):
     # performs the direct solution (Wiener-Hopf solution)
     # to determine optimal filter weights for equalization (optimal w/ respect to MMSE)
     # this function expects data @ 1sps, not 2 sps
     # if the data is available @ a higher samples/sym than 1,
     # it is OK to decimate the data and pass it into this block because
     # the equalization should take care of some of the timing.  Of course,
     # if the sps is 4 or greater, something smarter could probably be done
     # than just throwing data away, so think about that
     numTrainingSyms = len(self.preSyms) 
     x_n = x[0:numTrainingSyms]       # slice the appropriate preamble data
     
     # generate the input correlation matrix
     m = numTrainingSyms       
     X = numpy.fft.fft(x_n, 128)  # the FFT Size is 128 - this will change if the preamble length changes, so beware! 
     X_magSq = numpy.square(numpy.absolute(X))
     rxx = numpy.fft.ifft(X_magSq)
     rxx = rxx/m
     
     toeplitzMatCol = rxx[0:m]
     R = linalg.toeplitz(toeplitzMatCol)
     
     # generate the P vector
     xc = numpy.correlate(self.preSyms, x_n, 'full')
     P = xc[0:m]
             
     w = numpy.linalg.solve(R,P)
     w /= numpy.amax(numpy.absolute(w))      # scale the filter coefficients
     
     return w
コード例 #23
0
ファイル: GPToeplitz.py プロジェクト: nealegibson/GeePea
def CovarianceMatrixBlockToeplitzMult(theta,X,Y,ToeplitzKernel,mf,mf_pars,mf_args_pred,mf_args):
  """
  X - input matrix (q x D) - of training points
  Y - input matrix (n x D) - of predictive points
  theta - hyperparameter array/list
  K - (q x n) covariance matrix block

  Note that this only works when the step sizes are the same for X and Y, toplitz
  usaully 1D!

  """

  a = ToeplitzKernel(X,Y,theta,white_noise=False) #length q
  b = ToeplitzKernel(Y,X,theta,white_noise=False) #length n

  #return q x n matrix block
  K = LA.toeplitz(a,b)
  
  #get mean function
  m = mf(mf_pars,mf_args) * np.ones(Y.shape[0])
  #and predictive mean
  ms = mf(mf_pars,mf_args_pred) * np.ones(X.shape[0])
  #and calculate the affine transform
  Kp = np.diag(ms) * np.matrix(K) * np.diag(m)
  
  return np.matrix(Kp)
コード例 #24
0
ファイル: Tools.py プロジェクト: MPBA/pyHRV
 def aryw(x, order=30):
     x = x - _np.mean(x)
     ac = autocorr(x, order+1)
     R = _linalg.toeplitz(ac[:order])
     r = ac[1:order+1]
     params = _np.linalg.inv(R).dot(r)
     return(params)
コード例 #25
0
ファイル: demo_image.py プロジェクト: syarra/nlpy
    def __init__(self, n=80, sig=0.05, err=2, **kwargs):
        self.n = n  # Number of grid points
        self.sig = float(sig)  # Gaussian kernel width
        self.err = float(err) / 100  # Percent error in data

        # Setup grid
        h = 1.0 / n
        z = np.arange(h / 2, 1 - h / 2 + h, h)

        # Compute nxn matrix K = convolution with Gaussian kernel
        gaussKernel = 1 / sqrt(np.pi) / self.sig * np.exp(-(z - h / 2) ** 2 / self.sig ** 2)
        self.K = h * np.matrix(toeplitz(gaussKernel))

        # Setup true solution, blurred and noisy data
        trueimg = 0.75 * np.where((0.1 < z) & (z < 0.25), 1, 0)
        trueimg += 0.25 * np.where((0.3 < z) & (z < 0.32), 1, 0)
        trueimg += np.where((0.5 < z) & (z < 1), 1, 0) * np.sin(2 * np.pi * z) ** 4
        blurred = self.K * np.asmatrix(trueimg).T
        blurred = np.asarray(blurred.T)[0]  # np.matrix messes up your data
        noise = self.err * np.linalg.norm(blurred) * np.random.random(n) / sqrt(n)
        self.data = blurred + noise
        self.z = z
        self.trueimg = trueimg
        self.blurred = blurred
        self.setsolver()
コード例 #26
0
ファイル: chebfun.py プロジェクト: psammetichus/pychebfun
 def roots(self):
     """
     Utilises Boyd's O(n^2) recursive subdivision algorithm. The chebfun
     is recursively subsampled until it is successfully represented to 
     machine precision by a sequence of piecewise interpolants of degree
     100 or less. A colleague matrix eigenvalue solve is then applied to 
     each of these pieces and the results are concatenated.
     
     See: 
     J. P. Boyd, Computing zeros on a real interval through Chebyshev 
     expansion and polynomial rootfinding, SIAM J. Numer. Anal., 40 (2002), 
     pp. 1666–1682.
     """
     if self.size() <= 100:  
         ak = self.coefficients()
         v = np.zeros_like(ak[:-1])
         v[1] = 0.5
         C1 = linalg.toeplitz(v) 
         C2 = np.zeros_like(C1)
         C1[0,1] = 1.
         C2[-1,:] = ak[:-1]
         C = C1 - .5/ak[-1] * C2
         eigenvalues = linalg.eigvals(C) 
         roots = [eig.real for eig in eigenvalues
                 if np.allclose(eig.imag,0,atol=1e-10) 
                     and np.abs(eig.real) <=1]
         scaled_roots = self._ui_to_ab(np.array(roots))
         return scaled_roots
     else:
         # divide at a close-to-zero split-point
         split_point = self._ui_to_ab(0.0123456789)     
         return np.concatenate(
             (self.restrict([self._domain[0],split_point]).roots(),
              self.restrict([split_point,self._domain[1]]).roots())
         )
コード例 #27
0
ファイル: agtek.py プロジェクト: mpastell/Agtek481_2013
def aryw(x, p=5):
    x = x - mean(x)
    ac = autocorr(x, p+1)
    R = linalg.toeplitz(ac[:p])
    r = ac[1:p+1]
    params = inv(R).dot(r)
    return(params)
コード例 #28
0
ファイル: tests.py プロジェクト: dfm/ski
def test_toeplitz(N=50):
    print("Testing circulant linear algebra...")
    x = np.linspace(0, 10, N)
    y = np.vstack((np.sin(x), np.cos(x), x, x**2)).T
    c_row = np.exp(-0.5 * x ** 2)
    c_row[0] += 0.1
    cnum = circulant(c_row)
    cmat = CirculantMatrix(c_row)

    # Test dot products.
    assert np.allclose(np.dot(cnum, y[:, 0]), cmat.dot(y[:, 0]))
    assert np.allclose(np.dot(cnum, y), cmat.dot(y))

    # Test solves.
    assert np.allclose(np.linalg.solve(cnum, y[:, 0]), cmat.solve(y[:, 0]))
    assert np.allclose(np.linalg.solve(cnum, y), cmat.solve(y))

    # Test eigenvalues.
    ev = np.linalg.eigvals(cnum)
    ev = ev[np.argsort(np.abs(ev))[::-1]]
    assert np.allclose(np.abs(cmat.eigvals()), np.abs(ev))

    print("Testing Toeplitz linear algebra...")
    tnum = toeplitz(c_row)
    tmat = ToeplitzMatrix(c_row)

    # Test dot products.
    assert np.allclose(np.dot(tnum, y[:, 0]), tmat.dot(y[:, 0]))
    assert np.allclose(np.dot(tnum, y), tmat.dot(y))

    # Test solves.
    assert np.allclose(np.linalg.solve(tnum, y[:, 0]),
                       tmat.solve(y[:, 0], tol=1e-12, verbose=True))
    assert np.allclose(np.linalg.solve(tnum, y),
                       tmat.solve(y, tol=1e-12, verbose=True))
コード例 #29
0
ファイル: bispectrum.py プロジェクト: pabell/stingray
    def _cumulant3(self):
        """
            Calculates the 3rd Order cummulant of the lightcurve.
            Assigns: 
            self.cum3, 
            self.lags
        """
        # Initialize square cumulant matrix if zeros
        cum3_dim = 2 * self.maxlag + 1
        self.cum3 = np.zeros((cum3_dim, cum3_dim))

        # calculate lags for different values of 3rd order cumulant
        lagindex = np.arange(-self.maxlag, self.maxlag + 1)
        self.lags = lagindex * self.lc.dt

        # Defines indices for matrices
        ind = np.arange((self.n - self.maxlag) - 1, self.n)
        ind_t = np.arange(self.maxlag, self.n)
        zero_maxlag = np.zeros((1, self.maxlag))
        zero_maxlag_t = zero_maxlag.transpose()

        sig = self.signal.transpose()

        rev_signal = np.array([self.signal[0][::-1]])
        col = np.concatenate((sig[ind], zero_maxlag_t), axis=0)
        row = np.concatenate((rev_signal[0][ind_t], zero_maxlag[0]), axis=0)

        # converts row and column into a toeplitz matrix
        toep = toeplitz(col, row)
        rev_signal = np.repeat(rev_signal, [2 * self.maxlag + 1], axis=0)

        # Calulates Cummulant of 1D signal i.e. Lightcurve counts
        self.cum3 = self.cum3 + np.matmul(np.multiply(toep, rev_signal), toep.transpose())
コード例 #30
0
def firls(m, bands, desired, weight=None):

    if weight==None : weight = ones(len(bands)/2)
    bands, desired, weight = array(bands), array(desired), array(weight)

    #if not desired[-1] == 0 and bands[-1] == 1 and m % 2 == 1:
    if m % 2 == 1:
        m = m + 1

    M = m/2
    w = kron(weight, [-1,1])
    omega = bands * pi
    i1 = arange(1,M+1)

    # generate the matrix q
    # as illustrated in the above-cited reference, the matrix can be
    # expressed as the sum of a hankel and toeplitz matrix. a factor of
    # 1/2 has been dropped and the final filter hficients multiplied
    # by 2 to compensate.
    cos_ints = append(omega, sin(mat(arange(1,m+1)).T*mat(omega))).reshape((-1,omega.shape[0]))
    q = append(1, 1.0/arange(1.0,m+1)) * array(mat(cos_ints) * mat(w).T).T[0]
    q = toeplitz(q[:M+1]) + hankel(q[:M+1], q[M : ])

    # the vector b is derived from solving the integral:
    #
    #           _ w
    #          /   2
    #  b  =   /       w(w) d(w) cos(kw) dw
    #   k    /    w
    #       -      1
    #
    # since we assume that w(w) is constant over each band (if not, the
    # computation of q above would be considerably more complex), but
    # d(w) is allowed to be a linear function, in general the function
    # w(w) d(w) is linear. the computations below are derived from the
    # fact that:
    #     _
    #    /                          a              ax + b
    #   /   (ax + b) cos(nx) dx =  --- cos (nx) +  ------ sin(nx)
    #  /                             2                n
    # -                             n
    #


    enum = append(omega[::2]**2 - omega[1::2]**2, cos(mat(i1).T * mat(omega[1::2])) - cos(mat(i1).T * mat(omega[::2]))).flatten()
    deno = mat(append(2, i1)).T * mat(omega[1::2] - omega[::2])
    cos_ints2 = enum.reshape(deno.shape)/array(deno)

    d = zeros_like(desired)
    d[::2]  = -weight * desired[::2]
    d[1::2] =  weight * desired[1::2]

    b = append(1, 1.0/i1) * array(mat(kron (cos_ints2, [1, 1]) + cos_ints[:M+1,:]) * mat(d).T)[:,0]

    # having computed the components q and b of the  matrix equation,
    # solve for the filter hficients.
    a = (array(inv(q)*mat(b).T).T)[0]
    h = append( a[:0:-1], append(2*a[0],  a[1:]))

    return h
コード例 #31
0
    mask = np.abs(x.copy()) < eps
    x[mask] = 0
    return x


def maxabs(x):
    return np.max(np.abs(x))


if __name__ == '__main__':

    n = 5
    y = np.arange(n)
    x = np.random.randn(100, n)
    autocov = 2 * 0.8**np.arange(n) + 0.01 * np.random.randn(n)
    sigma = linalg.toeplitz(autocov)

    mat = PlainMatrixArray(sym=sigma)
    print tiny2zero(mat.mhalf)
    mih = mat.minvhalf
    print tiny2zero(mih)  #for nicer printing

    mat2 = PlainMatrixArray(data=x)
    print maxabs(mat2.yt_minv_y(np.dot(x.T, x)) - mat2.m)
    print tiny2zero(mat2.minv_y(mat2.m))

    mat3 = SvdArray(data=x)
    print mat3.meigh[0]
    print mat2.meigh[0]

    testcompare(mat2, mat3)
コード例 #32
0
 def time_toeplitz(self, size):
     sl.toeplitz(self.x)
コード例 #33
0
ファイル: rec.py プロジェクト: ufda/btgym
    def update(self, x):
        """
        Updates statistics estimates.

        Args:
            x: np.array, partial trajectory of shape [dim, num_updating_points]

        Returns:
            current dimension-wise mean and variance estimates of sizes [dim, 1], [dim, 1]
        """
        assert len(x.shape) == 2 and x.shape[0] == self.dim

        # Update length:
        k = x.shape[-1]

        self.num_obs += k
        if not self.is_decayed:
            self.alpha = 1 / (self.num_obs - 1)

        # Mean estimation:

        # Broadcast input to [dim, update_len, update_len]:
        xx = np.tile(x[:, None, :], [1, k, 1])

        gamma = 1 - self.alpha

        # Exp. decays as powers of (1-alpha):
        g = np.cumprod(np.repeat(gamma, k))

        # Diag. matrix of decayed coeff:
        tp = toeplitz(g / gamma, r=np.zeros(k))[::-1, ::1]

        # Backward-ordered mean updates as sums of decayed inputs:
        k_step_mean_update = np.sum(xx * tp[None, ...], axis=2)  # tp expanded for sure broadcast

        # Broadcast stored value of mean to [dim, 1] and apply decay:
        k_decayed_old_mean = (np.tile(self.mean[..., None], [1, k]) * g)

        # Get backward-recursive array of mean values from (num_obs - update_len) to (num_obs):
        means = k_decayed_old_mean + self.alpha * k_step_mean_update[:, ::-1]

        # Variance estimation:

        # Get deviations of update:
        dx = x - np.concatenate([self.mean[..., None], means[:, :-1]], axis=1)

        # Get new variance value at (num_obs) point:
        k_decayed_old_var = gamma ** k * self.variance
        k_step_var_update = np.sum(g[::-1] * dx ** 2, axis=1)

        variance = k_decayed_old_var + self.alpha * k_step_var_update

        # Update current values:
        self.mean = means[:, -1]
        self.variance = variance

        # Keep g and dx:
        self.g = g
        self.dx = dx

        return self.mean, self.variance
コード例 #34
0
ファイル: eligibility_traces.py プロジェクト: khivi/MDP-DP-RL
def get_decay_toeplitz_matrix(size: int, decay_param: float) -> np.ndarray:
    return toeplitz(np.power(decay_param, np.arange(size)),
                    np.insert(np.zeros(size - 1), 0, 1.))
コード例 #35
0
def Tmtx(data, K):
    '''
    Construct convolution matrix for a filter specified by 'data'

    '''
    return splin.toeplitz(data[K::], data[K::-1])
コード例 #36
0
 def test_scalar_04(self):
     r = array([10, 2, 3])
     t = toeplitz(1, r)
     assert_array_equal(t, [[1, 2, 3]])
コード例 #37
0
u_11 = np.zeros(n+1)
u_11[x > 0.3] = 1
u_12 = np.zeros(n+1)
u_12[x > 0.7] = 1
u_1 = u_11 - u_12

u_2 = x * (1 - x)

# Display response functions
plt.plot(x, u_1)
plt.plot(x, u_2)

# define forward operator
a = 1
c = np.exp(-a * x_matrix ** 2) / ((n - 1) * np.sqrt(np.pi/a))
K = la.toeplitz(c)
K_cond_nr = np.linalg.cond(K)
print('Condition number... ', K_cond_nr)

# Okay we are off good...
# Now let use vary that delta.. and recover u...
# But also see the impact of a.
result_dict = {}
for i, u_sel in enumerate([u_1, u_2]):
    u_key = f'u_{i+1}'
    result_dict.setdefault(u_key, {})

    for delta in [0.001, 0.01, 0.1]:
        temp_key = f"delta:{delta}"
        u_svd, s_svd, vh_svd = np.linalg.svd(K, full_matrices=True)
        K_svd_inv = vh_svd.conj().T @ np.diag(1 / s_svd) @ u_svd.conj().T
コード例 #38
0
 def test_scalar_00(self):
     """Scalar arguments still produce a 2D array."""
     t = toeplitz(10)
     assert_array_equal(t, [[10]])
     t = toeplitz(10, 20)
     assert_array_equal(t, [[10]])
コード例 #39
0
 def test_basic(self):
     y = toeplitz([1, 2, 3])
     assert_array_equal(y, [[1, 2, 3], [2, 1, 2], [3, 2, 1]])
     y = toeplitz([1, 2, 3], [1, 4, 5])
     assert_array_equal(y, [[1, 4, 5], [2, 1, 4], [3, 2, 1]])
コード例 #40
0
def cohens_kappa(table, weights=None, return_results=True, wt=None):
    '''Compute Cohen's kappa with variance and equal-zero test

    Parameters
    ----------
    table : array_like, 2-Dim
        square array with results of two raters, one rater in rows, second
        rater in columns
    weights : array_like
        The interpretation of weights depends on the wt argument.
        If both are None, then the simple kappa is computed.
        see wt for the case when wt is not None
        If weights is two dimensional, then it is directly used as a weight
        matrix. For computing the variance of kappa, the maximum of the
        weights is assumed to be smaller or equal to one.
        TODO: fix conflicting definitions in the 2-Dim case for
    wt : None or string
        If wt and weights are None, then the simple kappa is computed.
        If wt is given, but weights is None, then the weights are set to
        be [0, 1, 2, ..., k].
        If weights is a one-dimensional array, then it is used to construct
        the weight matrix given the following options.

        wt in ['linear', 'ca' or None] : use linear weights, Cicchetti-Allison
            actual weights are linear in the score "weights" difference
        wt in ['quadratic', 'fc'] : use linear weights, Fleiss-Cohen
            actual weights are squared in the score "weights" difference
        wt = 'toeplitz' : weight matrix is constructed as a toeplitz matrix
            from the one dimensional weights.

    return_results : bool
        If True (default), then an instance of KappaResults is returned.
        If False, then only kappa is computed and returned.

    Returns
    -------
    results or kappa
        If return_results is True (default), then a results instance with all
        statistics is returned
        If return_results is False, then only kappa is calculated and returned.

    Notes
    -----
    There are two conflicting definitions of the weight matrix, Wikipedia
    versus SAS manual. However, the computation are invariant to rescaling
    of the weights matrix, so there is no difference in the results.

    Weights for 'linear' and 'quadratic' are interpreted as scores for the
    categories, the weights in the computation are based on the pairwise
    difference between the scores.
    Weights for 'toeplitz' are a interpreted as weighted distance. The distance
    only depends on how many levels apart two entries in the table are but
    not on the levels themselves.

    example:

    weights = '0, 1, 2, 3' and wt is either linear or toeplitz means that the
    weighting only depends on the simple distance of levels.

    weights = '0, 0, 1, 1' and wt = 'linear' means that the first two levels
    are zero distance apart and the same for the last two levels. This is
    the sampe as forming two aggregated levels by merging the first two and
    the last two levels, respectively.

    weights = [0, 1, 2, 3] and wt = 'quadratic' is the same as squaring these
    weights and using wt = 'toeplitz'.

    References
    ----------
    Wikipedia
    SAS Manual

    '''
    table = np.asarray(table, float)  #avoid integer division
    agree = np.diag(table).sum()
    nobs = table.sum()
    probs = table / nobs
    freqs = probs  #TODO: rename to use freqs instead of probs for observed
    probs_diag = np.diag(probs)
    freq_row = table.sum(1) / nobs
    freq_col = table.sum(0) / nobs
    prob_exp = freq_col * freq_row[:, None]
    assert np.allclose(prob_exp.sum(), 1)
    #print prob_exp.sum()
    agree_exp = np.diag(prob_exp).sum()  #need for kappa_max
    if weights is None and wt is None:
        kind = 'Simple'
        kappa = (agree / nobs - agree_exp) / (1 - agree_exp)

        if return_results:
            #variance
            term_a = probs_diag * (1 - (freq_row + freq_col) * (1 - kappa))**2
            term_a = term_a.sum()
            term_b = probs * (freq_col[:, None] + freq_row)**2
            d_idx = np.arange(table.shape[0])
            term_b[d_idx, d_idx] = 0  #set diagonal to zero
            term_b = (1 - kappa)**2 * term_b.sum()
            term_c = (kappa - agree_exp * (1 - kappa))**2
            var_kappa = (term_a + term_b - term_c) / (1 - agree_exp)**2 / nobs
            #term_c = freq_col * freq_row[:, None] * (freq_col + freq_row[:,None])
            term_c = freq_col * freq_row * (freq_col + freq_row)
            var_kappa0 = (agree_exp + agree_exp**2 - term_c.sum())
            var_kappa0 /= (1 - agree_exp)**2 * nobs

    else:
        if weights is None:
            weights = np.arange(table.shape[0])
        #weights follows the Wikipedia definition, not the SAS, which is 1 -
        kind = 'Weighted'
        weights = np.asarray(weights, float)
        if weights.ndim == 1:
            if wt in ['ca', 'linear', None]:
                weights = np.abs(weights[:, None] - weights) /  \
                           (weights[-1] - weights[0])
            elif wt in ['fc', 'quadratic']:
                weights = (weights[:, None] - weights)**2 /  \
                           (weights[-1] - weights[0])**2
            elif wt == 'toeplitz':
                #assume toeplitz structure
                from scipy.linalg import toeplitz
                #weights = toeplitz(np.arange(table.shape[0]))
                weights = toeplitz(weights)
            else:
                raise ValueError('wt option is not known')
        else:
            rows, cols = table.shape
            if (table.shape != weights.shape):
                raise ValueError('weights are not square')
        #this is formula from Wikipedia
        kappa = 1 - (weights * table).sum() / nobs / (weights * prob_exp).sum()
        #TODO: add var_kappa for weighted version
        if return_results:
            var_kappa = np.nan
            var_kappa0 = np.nan
            #switch to SAS manual weights, problem if user specifies weights
            #w is negative in some examples,
            #but weights is scale invariant in examples and rough check of source
            w = 1. - weights
            w_row = (freq_col * w).sum(1)
            w_col = (freq_row[:, None] * w).sum(0)
            agree_wexp = (w * freq_col * freq_row[:, None]).sum()
            term_a = freqs * (w - (w_col + w_row[:, None]) * (1 - kappa))**2
            fac = 1. / ((1 - agree_wexp)**2 * nobs)
            var_kappa = term_a.sum() - (kappa - agree_wexp * (1 - kappa))**2
            var_kappa *= fac

            freqse = freq_col * freq_row[:, None]
            var_kappa0 = (freqse * (w - (w_col + w_row[:, None]))**2).sum()
            var_kappa0 -= agree_wexp**2
            var_kappa0 *= fac

    kappa_max = (np.minimum(freq_row, freq_col).sum() - agree_exp) / \
                (1 - agree_exp)

    if return_results:
        res = KappaResults(kind=kind,
                           kappa=kappa,
                           kappa_max=kappa_max,
                           weights=weights,
                           var_kappa=var_kappa,
                           var_kappa0=var_kappa0)
        return res
    else:
        return kappa
コード例 #41
0
 def _fetch_cis_oe(reg1, reg2):
     reg1_coords = tuple(regions.loc[reg1])
     reg2_coords = tuple(regions.loc[reg2])
     obs_mat = clr.matrix(balance=weight_name).fetch(reg1_coords)
     exp_mat = toeplitz(expected[reg1][:obs_mat.shape[0]])
     return obs_mat / exp_mat
コード例 #42
0
import matplotlib.pyplot as plt

n = 10**4
sim = 300
c = 0.1 * n

##
#simulating data
##
npr.seed(123456)
nClass = 3
dim = 3
P = [0.4, 0.3, 0.3]
Thetas = [np.array([0., 0, 0]), np.array([0., -2, 1]), np.array([1., 2, 0])]
Sigmas = [
    0.1 * spl.toeplitz([2., 0.5, 0]), 0.1 * spl.toeplitz([2., -0.5, 1]),
    0.1 * spl.toeplitz([1., .3, .3])
]

mix_obj = mixP(K=nClass)
mix_obj.mu = cp.deepcopy(Thetas)
mix_obj.sigma = cp.deepcopy(Sigmas)
mix_obj.p = cp.deepcopy(P)
mix_obj.d = dim
Y = mix_obj.simulate_data(n)

mix = mixP(K=nClass, high_memory=False)
mix.set_data(Y)
for i in range(10):  #np.int(np.ceil(0.1*self.sim))):  # @UnusedVariable
    mix.sample()
コード例 #43
0
A = domega * np.sum(states[0, :])
print('Initial area: ', A)
states[0, :] = states[0, :] / A  # Normalise distribution

# Time simulation
if 1:
    for t in range(t_steps - 1):
        if t % 100 == 0:
            print('Area at t =', t, ' equals ', np.sum(states[t, :]) * domega)
            print("calculating for t =", t)

        # Explanation of the discrete averaging integral underneath.

        H2 = np.append(0, states[t, :round(3 * x_steps / (4)) - 1:-1])
        H = toeplitz(states[t, :], H2)
        H = H[:, 1:]

        G2 = np.append(states[t, -1], states[t, 0:round(x_steps / (4))])
        G = hankel(states[t, :], G2)
        G = G[:, 1:]

        averagingintegral = np.sum(H * G, axis=1)
        # averagingintegral = averagingintegralfunc(states[t, :], x_steps)
        # if (abs(averagingintegral-averagingintegral_check) > eps).any():
        #     print('ERROR: averaging integral')
        #     print(averagingintegral)
        #     print(averagingintegral_check)
        #     print(averagingintegral.shape)
        #     print(averagingintegral_check.shape)
        #     break
コード例 #44
0
def questao_25():
    # -----------------------------------------------------------
    # Exercício 25
    # -----------------------------------------------------------
    NFFT = 1024
    N = 19
    SNR = 20
    h = [.34 - .21 * 1j, .87 + .43 * 1j, .34 - .27 * 1j]
    qam = QAM()
    channel = Filter(h, [1.])
    data = ChannelEqualization(qam,
                               channel,
                               N=N,
                               input_delay=int((N + len(h)) / 2),
                               noise=GaussianNoise,
                               SNR=SNR)
    a = qam(100000, )
    A = toeplitz(np.hstack([a[0], np.zeros(N)]), a)
    R = A.dot(A.T.conj()) / 100000
    trR = R.trace()
    mu_max = 1 / trR
    MSE, W = [], []
    for mu in (mu_max / 2, mu_max / 10, mu_max / 50):
        E_hist, W_hist = lms(data,
                             mu,
                             N + 1,
                             max_runs=50,
                             max_iter=5000,
                             dtype='complex128',
                             print_every=-1)

        MSE.append(np.mean(np.abs(E_hist)**2, axis=0))
        W.append(np.mean(W_hist, axis=0))
    plt.figure()
    for mse, name in zip(
            MSE,
        ['$\\mu_{\\max}/2$', '$\\mu_{\\max}/10$', '$\\mu_{\\max}/50$']):
        plt.plot(10 * np.log10(mse), label=name)
    plt.legend()
    plt.xlabel('Iteração')
    plt.ylabel('MSE (em dB)')
    # plt.savefig('ex25-mse-{}.pdf'.format(N+1), dpi=300, bbox_inches='tight')
    plt.show()

    b_adap = np.mean(W_hist, axis=0)[-1, :].conj()
    mse = np.mean(np.abs(E_hist)**2, axis=0)

    plt.figure()
    freqs = np.linspace(-1, 1, NFFT)
    plt.plot(freqs, 20 * np.log10(fft(h, n=NFFT)), label='Canal')
    plt.plot(freqs,
             20 * np.log10(fft(b_adap, n=NFFT)),
             'r--',
             label='Equalizador')
    plt.plot(freqs,
             20 * np.log10(np.abs(fft(np.convolve(b_adap, h), n=NFFT))),
             'y--',
             label='Canal equalizado')
    plt.xlim([-1, 1])
    plt.legend()
    plt.xlabel('Frequência normalizada')
    plt.ylabel('Magnitude (em dB)')
    plt.show()
    # plt.savefig('figs/ex25-freq-{}.pdf'.format(N+1), dpi=300, bbox_inches='tight')

    plt.figure()
    plt.plot(signal.convolve(b_adap, h))
    plt.xlabel('Amostra')
    plt.ylabel('Amplitude')
    plt.show()
    # plt.savefig('ex25-tempo-{}.pdf'.format(N+1), dpi=300, bbox_inches='tight')

    tx = qam(100)
    rx = signal.lfilter(h, [1.], tx)
    noise = GaussianNoise(std=np.sqrt(rx.var() / (2 * SNR)), complex=True)(100)
    rx += noise
    rx_eq = np.convolve(rx, b_adap)

    plt.figure()
    plt.plot(np.real(rx), np.imag(rx), 'o', label='Recebido')
    plt.plot(np.real(rx_eq), np.imag(rx_eq), 'o', label='Equalizado')
    plt.plot(1, 1, 'ro', label='Alvo')
    plt.plot(1, -1, 'ro')
    plt.plot(-1, 1, 'ro')
    plt.plot(-1, -1, 'ro')
    plt.legend()
    plt.xlabel('Real')
    plt.ylabel('Imaginário')
    plt.show()
def worker_train_VBN(input_worker_VBN):
    """Explanations"""
    #Global variables:
    global numInput,numOutput,numHidden1,numHidden2
    global dim_hidden2_output, dim_input_hidden1, dim_hidden1_hidden2
    global env
    
    #Local:

    seed=int(input_worker_VBN[0])
    p = input_worker_VBN[1]
    
    env.seed(seed) 
    #np.random.seed(seed)    
    VBN_dict = {}
    #VBN_dict['mu_i']=np.zeros((numInput,1))
    #VBN_dict['var_i']=np.ones((numInput,1))
    VBN_dict['mu_h1']=np.zeros((numHidden1,1))
    VBN_dict['var_h1']=np.ones((numHidden1,1))
    VBN_dict['mu_h2']=np.zeros((numHidden2,1))
    VBN_dict['var_h2']=np.zeros((numHidden2,1))   
    
    
    #Neural Networks:
    NN = NeuralNetwork(numInput,numHidden1,numHidden2,numOutput, VBN_dict)
    

    NN.W1=toeplitz(p[0][numInput:],p[0][:numInput])
    NN.W2=toeplitz(p[1][numHidden1:],p[1][:numHidden1])
    NN.W3=toeplitz(p[2][numHidden2:],p[2][:numHidden2])

    
    #SHOULD IT BE PLACED IN THE LOOP ? CANT THINK RIGHT NOW
    sum_zh1=[0.] * numHidden1  
    sum_zh2=[0.] *numHidden2
    #sum_zi=[0.] * numInput 
    
    sum_zh1_sq=[0.] * numHidden1  
    sum_zh2_sq=[0.] *numHidden2
    #sum_zi_sq=[0.] * numInput  
    
    steps=1000
    Ai = env.reset()  
    num_step=steps
    NN.use_VBN=False #we don't want to use feedforward options with VBN to compute the statistics
    
    for j in range(steps):
        
        Ao = NN.feedForward(Ai)  
                      
        sum_zh1=[sum(x) for x in zip(sum_zh1, NN.Z1)]
        sum_zh2=[sum(x) for x in zip(sum_zh2, NN.Z2)]
        #sum_zi=[sum(x) for x in zip(sum_zi, NN.Ai)]

        sum_zh1_sq=[sum(x) for x in zip(sum_zh1_sq, square(NN.Z1))]
        sum_zh2_sq=[sum(x) for x in zip(sum_zh2_sq, square(NN.Z2))]
        #sum_zi_sq=[sum(x) for x in zip(sum_zi_sq, square(NN.Ai))]
                
        action=np.argmax(Ao)      
        Ai, reward, done, info = env.step(action)
        
        if done:
            break
            num_step=j             
        
    #return(sum_zi,sum_zh1,sum_zh2,sum_zi_sq,sum_zh1_sq,sum_zh2_sq,num_step)
    return(sum_zh1,sum_zh2,sum_zh1_sq,sum_zh2_sq,num_step)
コード例 #46
0
            #if the user wants skew-symmetric matrix
            elif matrix_structure == 5:
                final = arr - arr.T

                print(final)

            #if the user wants Toepliz matrix
            elif matrix_structure == 6:

                #get all the first coloumn
                col = arr[:][0]

                #get the second coloumn
                row = arr[0]

                final = linalg.toeplitz(col, row)

                print(final)

            #if the user wants Circuilant matrix
            elif matrix_structure == 7:

                #get the first row in the matrix
                row = arr[0]

                final = linalg.circulant(row)
                print(final)

            #if the user wants stochastic matrix
            elif matrix_structure == 9:
コード例 #47
0
def pbdesign(n):
    """
    Generate a Plackett-Burman design

    Parameter
    ---------
    n : int
        The number of factors to create a matrix for.

    Returns
    -------
    H : 2d-array
        An orthogonal design matrix with n columns, one for each factor, and
        the number of rows being the next multiple of 4 higher than n (e.g.,
        for 1-3 factors there are 4 rows, for 4-7 factors there are 8 rows,
        etc.)

    Example
    -------

    A 3-factor design::

        >>> pbdesign(3)
        array([[-1., -1.,  1.],
               [ 1., -1., -1.],
               [-1.,  1., -1.],
               [ 1.,  1.,  1.]])

    A 5-factor design::

        >>> pbdesign(5)
        array([[-1., -1.,  1., -1.,  1.],
               [ 1., -1., -1., -1., -1.],
               [-1.,  1., -1., -1.,  1.],
               [ 1.,  1.,  1., -1., -1.],
               [-1., -1.,  1.,  1., -1.],
               [ 1., -1., -1.,  1.,  1.],
               [-1.,  1., -1.,  1., -1.],
               [ 1.,  1.,  1.,  1.,  1.]])

    """
    if not n > 0:
        raise Exception('Number of factors must be a positive integer')
    keep = int(n)
    n = 4 * (int(n / 4) + 1
             )  # calculate the correct number of rows (multiple of 4)
    f, e = np.frexp([n, n / 12., n / 20.])
    k = [idx for idx, val in enumerate(np.logical_and(f == 0.5, e > 0)) if val]

    if not (isinstance(n, int) and k != []):
        raise Exception('Invalid inputs. n must be a multiple of 4.')

    k = k[0]
    e = e[k] - 1

    if k == 0:  # N = 1*2**e
        H = np.ones((1, 1))
    elif k == 1:  # N = 12*2**e
        H = np.vstack(
            (np.ones((1, 12)),
             np.hstack((np.ones((11, 1)),
                        toeplitz([-1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1],
                                 [-1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1])))))
    elif k == 2:  # N = 20*2**e
        H = np.vstack((np.ones((1, 20)),
                       np.hstack((np.ones((19, 1)),
                                  hankel([
                                      -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1,
                                      -1, 1, 1, 1, 1, -1, -1, 1
                                  ], [
                                      1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1,
                                      1, -1, 1, 1, 1, 1, -1, -1
                                  ])))))

    # Kronecker product construction
    for _ in range(e):
        H = np.vstack((np.hstack((H, H)), np.hstack((H, -H))))

    # Reduce the size of the matrix as needed
    H = H[:, 1:(keep + 1)]

    return np.flipud(H)
コード例 #48
0
    def estimate_parameters_diracs_sectoral(self, spectrum):

        L = spectrum.L
        l = np.arange(L)

        #Get sectoral SH coefficients
        f_ll = spectrum[l, l]
        N_ll = get_N_l_m(l, l)

        N_l = np.zeros(N_ll.shape, dtype=np.complex)
        for ll in l:
            N_l[ll] = polypart_coeffs(ll, ll, L)[-1]

        z_l = f_ll / N_ll / N_l

        #Cadzow denoising
        z_l = self.cadzow(z_l)

        #Compute annihilating filter coefficients
        Z = linalg.toeplitz(z_l[self.K:], np.flipud(z_l[:self.K + 1]))
        U, s, V = np.linalg.svd(Z)
        V = V.conj().T

        u_k = np.roots(V[:, -1])

        #Find the phi_k angles
        phi_k = np.mod(-np.angle(u_k), 2 * np.pi)

        #Find the cks
        van = np.vander(u_k, len(z_l), increasing=True).T
        c_k = np.linalg.lstsq(van, z_l)[0]

        f_lm1l = spectrum[l[1:], l[:-1]]
        N_lm1l = get_N_l_m(l[1:], l[:-1])

        N_lm1 = np.zeros(N_lm1l.shape, dtype=np.complex)
        for ll in l[1:]:
            N_lm1[ll - 1] = polypart_coeffs(ll, ll - 1, L)[-2]

        w_l = f_lm1l / N_lm1l / N_lm1
        van2 = np.vander(u_k, len(w_l), increasing=True).T

        cos_k_exp_rk = np.real(np.linalg.lstsq(van2, w_l)[0] / c_k)

        #Estimate the width
        a = u_k / np.exp(-1j * phi_k)
        b = cos_k_exp_rk
        r_k = np.real(0.5 * np.log(
            (-b**2 + np.sqrt(4 * a**2 + b**4)) / (2 * a**2)))

        cos_k = cos_k_exp_rk / np.exp(-r_k)
        theta_k = np.arccos(cos_k)

        #correct the rks (if the roots are outside the unit circle)
        r_k[np.where(r_k < 0)] = 1 / 200.0

        #update the cks using the entire spectrum
        spectrum_1D = spectrum.to_1D()
        mat = np.zeros([spectrum_1D.shape[0], self.K], dtype=np.complex)
        sph_vpw = Spherical_FRI(theta_k, phi_k, np.ones(theta_k.shape), r_k)

        for k in np.arange(self.K):
            mat[:, k] = Spherical_Function_Spectrum(
                sph_vpw.vpw_pulse_sh(k, spectrum.L)).to_1D()

        c_k = np.real(np.linalg.lstsq(mat, spectrum_1D)[0])

        #Call nonlinear least squares method to refine the estimation
        vpw_fct = lambda x: Spherical_FRI(x[:self.K], x[self.K:2 * self.K], x[
            2 * self.K:3 * self.K], x[3 * self.K:]).vpw_sh(L=spectrum.L).to_1D(
            )
        vpw_diff = lambda x: np.abs(spectrum_1D - vpw_fct(x))

        x0 = np.hstack((theta_k, phi_k, c_k, r_k))

        x_new = optimize.leastsq(vpw_diff, x0)[0]

        #print 'Squared error before nonlinear optimization:'
        #print np.sum(vpw_diff(x0)**2)

        #print 'Squared error after nonlinear optimization:'
        #print np.sum(vpw_diff(x_new)**2)

        theta_k = x_new[:self.K]
        phi_k = x_new[self.K:2 * self.K]
        c_k = x_new[2 * self.K:3 * self.K]
        r_k = x_new[3 * self.K:]

        return Spherical_FRI(theta_k, phi_k, c_k, r_k)
コード例 #49
0
ファイル: simulatedtest.py プロジェクト: niliafsari/Toeplitz
#!/usr/bin/env python

import sys
import numpy as np
from scipy.linalg import inv, toeplitz
from toeplitz_decomp import *

# 1-d case
a = np.load('simdata.npy')
pad = 1
b = 1
n = a.shape[0]
print n
A = toeplitz(a[:, 0])
z = np.zeros(shape=((n + pad * n) * b, (n + pad * n) * b), dtype=complex)
z[0:n * b, 0:n * b] = toeplitz(a[:, 0])
#A=toeplitz(z[:,0])
#in0=np.copy(a[0:512,0:1])
#toeplitz_decomp
#l=toeplitz_blockschur(np.conj(a[:,0:1].T),1)
l = toep_zpad(np.array(z[:, 0:1]).reshape(-1, ).tolist(), 0)
k = np.copy(l[n + n * pad - 1, n * pad:n + n * pad])
u = np.copy(l[n + n * pad - 1, 0:n + n * pad])
ll = np.correlate(u, k)[1:n + 1]
ll = ll[::-1]
print sum(ll[:] - a[:, 0])
#result = np.dot(l,np.conj(l).T)
AA = toeplitz(np.conj(ll[0:n]))
print("Consistency check, these numbers should be small:", np.sum(AA - A))

# 1-d case
コード例 #50
0
	plt.xlabel('Time (s)')
	
	plt.subplot(2,2,4)
	plt.plot(n[0:N-3], dot(D_jerk, position_profile))
	plt.xlabel('Time (s)')
	plt.title('Jerk')
		

'''
Solve minimum effort control problem for a partical at an intial postion of 2.0
and a final position of 5.0. This is the "straight path" trajectory. 
'''
# set up jerk matrix D_{jerk}:
row_jerk = hstack((array([[-1, 3, -3, 1]]), zeros((1,N-4))))
col_jerk = vstack((array([[-1]]), zeros((N-4,1))))
D_jerk = power(N, 3)*toeplitz(col_jerk, row_jerk)

# set up constraint matrices:Aeq*x = beq
initial_position = hstack((array([[1]]), zeros((1,N-1))))
final_position = hstack((zeros((1,N-1)), array([[1]])))

initial_velocity = hstack((array([[-1, 1]]), zeros((1,N-2))))
final_velocity = hstack((zeros((1,N-2)), array([[-1, 1]])))

initial_acceleration = hstack((array([[1,-2,1]]), zeros((1,N-3))))
final_acceleration = hstack((zeros((1,N-3)), array([[1,-2,1]])))

Aeq = vstack((initial_position, final_position, initial_velocity, \
	final_velocity, initial_acceleration, final_acceleration))
beq = zeros((6,1))
beq[0] = 2 #initial position
コード例 #51
0
def firls(numtaps, bands, desired, weight=None, nyq=None, fs=None):
    """
    FIR filter design using least-squares error minimization.

    Calculate the filter coefficients for the linear-phase finite
    impulse response (FIR) filter which has the best approximation
    to the desired frequency response described by `bands` and
    `desired` in the least squares sense (i.e., the integral of the
    weighted mean-squared error within the specified bands is
    minimized).

    Parameters
    ----------
    numtaps : int
        The number of taps in the FIR filter. `numtaps` must be odd.
    bands : array_like
        A monotonic nondecreasing sequence containing the band edges in
        Hz. All elements must be non-negative and less than or equal to
        the Nyquist frequency given by `nyq`.
    desired : array_like
        A sequence the same size as `bands` containing the desired gain
        at the start and end point of each band.
    weight : array_like, optional
        A relative weighting to give to each band region when solving
        the least squares problem. `weight` has to be half the size of
        `bands`.
    nyq : float, optional, deprecated
        This is the Nyquist frequency. Each frequency in `bands` must be
        between 0 and `nyq` (inclusive). Default is 1.

        .. deprecated:: 1.0.0
           `firls` keyword argument `nyq` is deprecated in favour of `fs` and
           will be removed in SciPy 1.12.0.
    fs : float, optional
        The sampling frequency of the signal. Each frequency in `bands`
        must be between 0 and ``fs/2`` (inclusive). Default is 2.

    Returns
    -------
    coeffs : ndarray
        Coefficients of the optimal (in a least squares sense) FIR filter.

    See also
    --------
    firwin
    firwin2
    minimum_phase
    remez

    Notes
    -----
    This implementation follows the algorithm given in [1]_.
    As noted there, least squares design has multiple advantages:

        1. Optimal in a least-squares sense.
        2. Simple, non-iterative method.
        3. The general solution can obtained by solving a linear
           system of equations.
        4. Allows the use of a frequency dependent weighting function.

    This function constructs a Type I linear phase FIR filter, which
    contains an odd number of `coeffs` satisfying for :math:`n < numtaps`:

    .. math:: coeffs(n) = coeffs(numtaps - 1 - n)

    The odd number of coefficients and filter symmetry avoid boundary
    conditions that could otherwise occur at the Nyquist and 0 frequencies
    (e.g., for Type II, III, or IV variants).

    .. versionadded:: 0.18

    References
    ----------
    .. [1] Ivan Selesnick, Linear-Phase Fir Filter Design By Least Squares.
           OpenStax CNX. Aug 9, 2005.
           http://cnx.org/contents/eb1ecb35-03a9-4610-ba87-41cd771c95f2@7

    Examples
    --------
    We want to construct a band-pass filter. Note that the behavior in the
    frequency ranges between our stop bands and pass bands is unspecified,
    and thus may overshoot depending on the parameters of our filter:

    >>> from scipy import signal
    >>> import matplotlib.pyplot as plt
    >>> fig, axs = plt.subplots(2)
    >>> fs = 10.0  # Hz
    >>> desired = (0, 0, 1, 1, 0, 0)
    >>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))):
    ...     fir_firls = signal.firls(73, bands, desired, fs=fs)
    ...     fir_remez = signal.remez(73, bands, desired[::2], fs=fs)
    ...     fir_firwin2 = signal.firwin2(73, bands, desired, fs=fs)
    ...     hs = list()
    ...     ax = axs[bi]
    ...     for fir in (fir_firls, fir_remez, fir_firwin2):
    ...         freq, response = signal.freqz(fir)
    ...         hs.append(ax.semilogy(0.5*fs*freq/np.pi, np.abs(response))[0])
    ...     for band, gains in zip(zip(bands[::2], bands[1::2]),
    ...                            zip(desired[::2], desired[1::2])):
    ...         ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2)
    ...     if bi == 0:
    ...         ax.legend(hs, ('firls', 'remez', 'firwin2'),
    ...                   loc='lower center', frameon=False)
    ...     else:
    ...         ax.set_xlabel('Frequency (Hz)')
    ...     ax.grid(True)
    ...     ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude')
    ...
    >>> fig.tight_layout()
    >>> plt.show()

    """  # noqa
    nyq = 0.5 * _get_fs(fs, nyq)

    numtaps = int(numtaps)
    if numtaps % 2 == 0 or numtaps < 1:
        raise ValueError("numtaps must be odd and >= 1")
    M = (numtaps - 1) // 2

    # normalize bands 0->1 and make it 2 columns
    nyq = float(nyq)
    if nyq <= 0:
        raise ValueError('nyq must be positive, got %s <= 0.' % nyq)
    bands = np.asarray(bands).flatten() / nyq
    if len(bands) % 2 != 0:
        raise ValueError("bands must contain frequency pairs.")
    if (bands < 0).any() or (bands > 1).any():
        raise ValueError("bands must be between 0 and 1 relative to Nyquist")
    bands.shape = (-1, 2)

    # check remaining params
    desired = np.asarray(desired).flatten()
    if bands.size != desired.size:
        raise ValueError("desired must have one entry per frequency, got %s "
                         "gains for %s frequencies." %
                         (desired.size, bands.size))
    desired.shape = (-1, 2)
    if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any():
        raise ValueError("bands must be monotonically nondecreasing and have "
                         "width > 0.")
    if (bands[:-1, 1] > bands[1:, 0]).any():
        raise ValueError("bands must not overlap.")
    if (desired < 0).any():
        raise ValueError("desired must be non-negative.")
    if weight is None:
        weight = np.ones(len(desired))
    weight = np.asarray(weight).flatten()
    if len(weight) != len(desired):
        raise ValueError("weight must be the same size as the number of "
                         "band pairs (%s)." % (len(bands), ))
    if (weight < 0).any():
        raise ValueError("weight must be non-negative.")

    # Set up the linear matrix equation to be solved, Qa = b

    # We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n)
    # where Q1(k,n)=q(k-n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel.

    # We omit the factor of 0.5 above, instead adding it during coefficient
    # calculation.

    # We also omit the 1/π from both Q and b equations, as they cancel
    # during solving.

    # We have that:
    #     q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π)
    # Using our nomalization ω=πf and with a constant weight W over each
    # interval f1->f2 we get:
    #     q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf
    # integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
    n = np.arange(numtaps)[:, np.newaxis, np.newaxis]
    q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weight)

    # Now we assemble our sum of Toeplitz and Hankel
    Q1 = toeplitz(q[:M + 1])
    Q2 = hankel(q[:M + 1], q[M:])
    Q = Q1 + Q2

    # Now for b(n) we have that:
    #     b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π)
    # Using our normalization ω=πf and with a constant weight W over each
    # interval and a linear term for D(ω) we get (over each f1->f2 interval):
    #     b(n) = W ∫ (mf+c)cos(πnf)df
    #          = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2
    # integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
    n = n[:M + 1]  # only need this many coefficients here
    # Choose m and c such that we are at the start and end weights
    m = (np.diff(desired, axis=1) / np.diff(bands, axis=1))
    c = desired[:, [0]] - bands[:, [0]] * m
    b = bands * (m * bands + c) * np.sinc(bands * n)
    # Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0
    b[0] -= m * bands * bands / 2.
    b[1:] += m * np.cos(n[1:] * np.pi * bands) / (np.pi * n[1:])**2
    b = np.dot(np.diff(b, axis=2)[:, :, 0], weight)

    # Now we can solve the equation
    try:  # try the fast way
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            a = solve(Q, b, assume_a="pos", check_finite=False)
        for ww in w:
            if (ww.category == LinAlgWarning
                    and str(ww.message).startswith('Ill-conditioned matrix')):
                raise LinAlgError(str(ww.message))
    except LinAlgError:  # in case Q is rank deficient
        # This is faster than pinvh, even though we don't explicitly use
        # the symmetry here. gelsy was faster than gelsd and gelss in
        # some non-exhaustive tests.
        a = lstsq(Q, b, lapack_driver='gelsy')[0]

    # make coefficients symmetric (linear phase)
    coeffs = np.hstack((a[:0:-1], 2 * a[0], a[1:]))
    return coeffs
コード例 #52
0
 def generate_block(self, k=None):
     from scipy.linalg import toeplitz
     r = self.block_initializer(size=(self.block_size, ), k=k)
     c = self.block_initializer(size=(self.block_size, ), k=k)
     block = toeplitz(r, c)
     return block
コード例 #53
0
    def create_A_and_b_room2(self):
        """ Initializes the matrices A and b for room 2. 
            For room 2, b will change in every iteration, while A is CONSTANT """
        height = 2  # heigth of room 2
        M = int(round(height / self.dx)) - 1  # number of rows of nodes
        N = self.N  # number of cols of nodes
        size = M * N  # number of unknown nodes
        """ Create A """
        # The bulk of A is very close to a toeplitz matrix with 5 diagonals.
        first_row = np.zeros(size)
        first_row[0] = -4
        first_row[1] = 1
        first_row[N] = 1
        A = sl.toeplitz(first_row, first_row)

        # The two inner super- and subdiagonals of this toeplitz matrix need to
        # be modified. Specifically, every N:th element should be set to zero,
        # for a total of (M-1) times, since our grid has M rows, and only the
        # first element to be set to zero goes "outside" of the matrix A.
        #
        # SUB: first zero goes in row N.
        row = N
        for i in range(M - 1):
            A[row, row - 1] = 0
            row += N

        # SUPER: first zero goes in row N-1.
        row = N - 1
        for i in range(M - 1):
            A[row, row + 1] = 0
            row += N

        # [Building b].
        # Room 2 has 6 different (Dirichlet) boundaries. Of these, 2 change in every
        # iteration, while 4 are constant. Here we initialize b,considering only the
        # 4 constant boundary conditions, while the other 2 are considered in
        # update_b_room2(), called in every iteration in solve().
        b = np.zeros(size)

        # Upper bounndary:
        b[:N] = -self.heater_temp

        # Lower boundary:
        b[-N:] = -self.window_temp

        # Upper left boundary:
        # Every N nodes are effected by the upper left boundary, and in total
        # N+1 nodes are effected. The first effected node is the 0:th node.
        index = 0
        for i in range(N + 1):
            b[index] -= self.wall_temp
            index += N

        # Lower right boundary:
        # Every N nodes are effected by the upper left boundary, and in total
        # N+1 nodes are effected. The first effected node is the (N^2+(N-1)):th node.
        index = N**2 + (N - 1)
        for i in range(N + 1):
            b[index] -= self.wall_temp
            index += N
        self.A = sp.csc_matrix(A, dtype=float).todense()
        self.b = b
コード例 #54
0
    def computeWeights(self,
                       sources,
                       interferers,
                       R_n,
                       delay=0.03,
                       epsilon=5e-3):

        dist_mat = pra.distance(self.R, sources)
        s_time = dist_mat / pra.c
        s_dmp = 1. / (4 * np.pi * dist_mat)

        dist_mat = pra.distance(self.R, interferers)
        i_time = dist_mat / pra.c
        i_dmp = 1. / (4 * np.pi * dist_mat)

        offset = np.maximum(s_dmp.max(),
                            i_dmp.max()) / (np.pi * self.Fs * epsilon)
        t_min = np.minimum(s_time.min(), i_time.min())
        t_max = np.maximum(s_time.max(), i_time.max())

        s_time -= t_min - offset
        i_time -= t_min - offset
        Lh = int((t_max - t_min + 2 * offset) * float(self.Fs))

        if ((Lh - 1) > (self.M - 1) * self.Lg):
            import warnings
            wng = "Beamforming filters length (%d) are shorter than minimum required (%d)." % (
                self.Lg, Lh)
            warnings.warn(wng, UserWarning)

        # the channel matrix
        Lg = self.Lg
        L = self.Lg + Lh - 1
        H = np.zeros((Lg * self.M, 2 * L))

        for r in np.arange(self.M):

            hs = pra.lowPassDirac(s_time[r, :, np.newaxis], s_dmp[r, :,
                                                                  np.newaxis],
                                  self.Fs, Lh).sum(axis=0)
            row = np.pad(hs, ((0, L - len(hs))), mode='constant')
            col = np.pad(hs[:1], ((0, Lg - 1)), mode='constant')
            H[r * Lg:(r + 1) * Lg, 0:L] = toeplitz(col, row)

            hi = pra.lowPassDirac(i_time[r, :, np.newaxis], i_dmp[r, :,
                                                                  np.newaxis],
                                  self.Fs, Lh).sum(axis=0)
            row = np.pad(hi, ((0, L - len(hi))), mode='constant')
            col = np.pad(hi[:1], ((0, Lg - 1)), mode='constant')
            H[r * Lg:(r + 1) * Lg, L:2 * L] = toeplitz(col, row)

        # the constraint vector
        kappa = int(delay * self.Fs)
        #kappa = np.minimum(int(0.6*(Lh+Lg)), int(2*t_max*self.Fs))
        h = H[:, kappa]

        # We first assume the sample are uncorrelated
        R_xx = np.dot(H[:, :L], H[:, :L].T)
        K_nq = np.dot(H[:, L:], H[:, L:].T) + R_n

        # Compute the TD filters
        C = la.cho_factor(R_xx + K_nq, check_finite=False)
        g_val = la.cho_solve(C, h)

        g_val /= np.inner(h, g_val)
        self.filters = g_val.reshape((self.M, Lg))
        '''
        import matplotlib.pyplot as plt
        plt.figure()
        plt.subplot(2,1,1)
        plt.plot(np.arange(L)/float(self.Fs), np.dot(H[:,:L].T, g_val))
        plt.plot(np.arange(L)/float(self.Fs), np.dot(H[:,L:].T, g_val))
        plt.legend(('Channel of desired source','Channel of interferer'))
        plt.subplot(2,1,2)
        for m in np.arange(self.M):
            plt.plot(np.arange(Lh)/float(self.Fs), H[m*Lg,:Lh])
        '''

        # compute and return SNR
        num = np.inner(g_val.T, np.dot(R_xx, g_val))
        denom = np.inner(np.dot(g_val.T, K_nq), g_val)

        return num / denom
コード例 #55
0
    'pvalues': mdl.pvalues,
    'params': mdl.params
},
                  index=x_columns)

if 'const' in pp.sort_values('pvalues')[:s].index:
    sig_ind = sorted(np.argpartition(mdl.pvalues, s)[:s])
else:
    sig_ind = sorted(np.argpartition(mdl.pvalues, s - 1)[:(s - 1)]) + [0]

x_f_new = x_f[:N, sig_ind]
x_columns_new = x_columns[sig_ind]

y_f_new = y_f[:N]

cov_mat = toeplitz(0.5**np.arange(d - s))
np.random.seed(93)
x_f_new = np.hstack(
    (x_f_new, np.random.multivariate_normal(np.zeros(d - s), cov_mat, N)))
x_f_new[:, int(-(d - s) /
               2):] = (x_f_new[:, int(-(d - s) / 2):] >= 0).astype(float)

x_m_new = x_f_new[:n0]
y_m = y_f_new[:n0]
logregcv = LogisticRegressionCV(cv=10,
                                scoring='neg_log_loss',
                                penalty='l1',
                                solver='saga',
                                fit_intercept=False,
                                max_iter=1000,
                                n_jobs=-1,
コード例 #56
0
def estimate_time_constant(Y,
                           sn,
                           p=None,
                           lags=5,
                           include_noise=False,
                           pixels=None):
    """
    Estimating global time constants for the dataset Y through the autocovariance function (optional).
    The function is no longer used in the standard setting of the algorithm since every trace has its own
    time constant.

    Args:
        Y: np.ndarray (2D)
            input movie data with time in the last axis

        p: positive integer
            order of AR process, default: 2

        lags: positive integer
            number of lags in the past to consider for determining time constants. Default 5

        include_noise: Boolean
            Flag to include pre-estimated noise value when determining time constants. Default: False

        pixels: np.ndarray
            Restrict estimation to these pixels (e.g., remove saturated pixels). Default: All pixels

    Returns:
        g:  np.ndarray (p x 1)
            Discrete time constants
    """
    if p is None:
        raise Exception("You need to define p")
    if pixels is None:
        pixels = np.arange(old_div(np.size(Y), np.shape(Y)[-1]))

    from scipy.linalg import toeplitz
    npx = len(pixels)
    lags += p
    XC = np.zeros((npx, 2 * lags + 1))
    for j in range(npx):
        XC[j, :] = np.squeeze(axcov(np.squeeze(Y[pixels[j], :]), lags))

    gv = np.zeros(npx * lags)
    if not include_noise:
        XC = XC[:, np.arange(lags - 1, -1, -1)]
        lags -= p

    A = np.zeros((npx * lags, p))
    for i in range(npx):
        if not include_noise:
            A[i * lags + np.arange(lags), :] = toeplitz(
                np.squeeze(XC[i, np.arange(p - 1, p + lags - 1)]),
                np.squeeze(XC[i, np.arange(p - 1, -1, -1)]))
        else:
            A[i * lags + np.arange(lags), :] = toeplitz(
                np.squeeze(XC[i, lags + np.arange(lags)]),
                np.squeeze(XC[i, lags +
                              np.arange(p)])) - (sn[i]**2) * np.eye(lags, p)
            gv[i * lags + np.arange(lags)] = np.squeeze(XC[i, lags + 1:])

    if not include_noise:
        gv = XC[:, p:].T
        gv = np.squeeze(np.reshape(gv, (np.size(gv), 1), order='F'))

    g = np.dot(np.linalg.pinv(A), gv)

    return g
コード例 #57
0
ファイル: propagate_filter.py プロジェクト: mniehus/PyDynamic
def FIRuncFilter(y,
                 sigma_noise,
                 theta,
                 Utheta=None,
                 shift=0,
                 blow=None,
                 kind="corr"):
    """Uncertainty propagation for signal y and uncertain FIR filter theta

    Parameters
    ----------
        y: np.ndarray
            filter input signal
        sigma_noise: float or np.ndarray
            float:    standard deviation of white noise in y
            1D-array: interpretation depends on kind
        theta: np.ndarray
            FIR filter coefficients
        Utheta: np.ndarray
            covariance matrix associated with theta
        shift: int
            time delay of filter output signal (in samples)
        blow: np.ndarray
            optional FIR low-pass filter
        kind: string
            only meaningfull in combination with isinstance(sigma_noise, numpy.ndarray)
            "diag": point-wise standard uncertainties of non-stationary white noise
            "corr": single sided autocovariance of stationary (colored/corrlated) noise (default)

    Returns
    -------
        x: np.ndarray
            FIR filter output signal
        ux: np.ndarray
            point-wise uncertainties associated with x


    References
    ----------
        * Elster and Link 2008 [Elster2008]_

    .. seealso:: :mod:`PyDynamic.deconvolution.fit_filter`

    """

    Ntheta = len(theta)  # FIR filter size
    # filterOrder = Ntheta - 1   # FIR filter order

    if not isinstance(Utheta,
                      np.ndarray):  # handle case of zero uncertainty filter
        Utheta = np.zeros((Ntheta, Ntheta))

    # check which case of sigma_noise is necessary
    if isinstance(sigma_noise, float):
        sigma2 = sigma_noise**2

    elif isinstance(sigma_noise, np.ndarray):
        if kind == "diag":
            sigma2 = sigma_noise**2
        elif kind == "corr":
            sigma2 = sigma_noise
        else:
            raise ValueError("unknown kind of sigma_noise")

    else:
        raise ValueError(
            "sigma_noise is neither of type float nor numpy.ndarray.")

    if isinstance(blow, np.ndarray
                  ):  # calculate low-pass filtered signal and propagate noise

        if isinstance(sigma2, float):
            Bcorr = np.correlate(blow, blow,
                                 'full')  # len(Bcorr) == 2*Ntheta - 1
            ycorr = sigma2 * Bcorr[len(
                blow) - 1:]  # only the upper half of the correlation is needed

            # trim / pad to length Ntheta
            ycorr = trimOrPad(ycorr, Ntheta)
            Ulow = toeplitz(ycorr)

        elif isinstance(sigma2, np.ndarray):

            if kind == "diag":
                # [Leeuw1994](Covariance matrix of ARMA errors in closed form) can be used, to derive this formula
                # The given "blow" corresponds to a MA(q)-process.
                # Going through the calculations of Leeuw, but assuming
                # that E(vv^T) is a diagonal matrix with non-identical elements,
                # the covariance matrix V becomes (see Leeuw:corollary1)
                # V = N * SP * N^T + M * S * M^T
                # N, M are defined as in the paper
                # and SP is the covariance of input-noise prior to the observed time-interval
                # (SP needs be available len(blow)-timesteps into the past. Here it is
                # assumed, that SP is constant with the first value of sigma2)

                # V needs to be extended to cover Ntheta-1 timesteps more into the past
                sigma2_extended = np.append(sigma2[0] * np.ones((Ntheta - 1)),
                                            sigma2)

                N = toeplitz(blow[1:][::-1], np.zeros_like(sigma2_extended)).T
                M = toeplitz(trimOrPad(blow, len(sigma2_extended)),
                             np.zeros_like(sigma2_extended))
                SP = np.diag(sigma2[0] * np.ones_like(blow[1:]))
                S = np.diag(sigma2_extended)

                # Ulow is to be sliced from V, see below
                V = N.dot(SP).dot(N.T) + M.dot(S).dot(M.T)

            elif kind == "corr":

                # adjust the lengths sigma2 to fit blow and theta
                # this either crops (unused) information or appends zero-information
                # note1: this is the reason, why Ulow will have dimension (Ntheta x Ntheta) without further ado

                # calculate Bcorr
                Bcorr = np.correlate(blow, blow, "full")

                # pad or crop length of sigma2, then reflect some part to the left and invert the order
                # [0 1 2 3 4 5 6 7] --> [0 0 0 7 6 5 4 3 2 1 0 1 2 3]
                sigma2 = trimOrPad(sigma2, len(blow) + Ntheta - 1)
                sigma2_reflect = np.pad(sigma2, (len(blow) - 1, 0),
                                        mode="reflect")

                ycorr = np.correlate(
                    sigma2_reflect, Bcorr, mode="valid"
                )  # used convolve in a earlier version, should make no difference as Bcorr is symmetric
                Ulow = toeplitz(ycorr)

        xlow, _ = lfilter(blow, 1.0, y, zi=y[0] * lfilter_zi(blow, 1.0))

    else:  # if blow is not provided
        if isinstance(sigma2, float):
            Ulow = np.eye(Ntheta) * sigma2

        elif isinstance(sigma2, np.ndarray):

            if kind == "diag":
                # V needs to be extended to cover Ntheta timesteps more into the past
                sigma2_extended = np.append(sigma2[0] * np.ones((Ntheta - 1)),
                                            sigma2)

                # Ulow is to be sliced from V, see below
                V = np.diag(
                    sigma2_extended
                )  #  this is not Ulow, same thing as in the case of a provided blow (see above)

            elif kind == "corr":
                Ulow = toeplitz(trimOrPad(sigma2, Ntheta))

        xlow = y

    # apply FIR filter to calculate best estimate in accordance with GUM
    x, _ = lfilter(theta, 1.0, xlow, zi=xlow[0] * lfilter_zi(theta, 1.0))
    x = np.roll(x, -int(shift))

    # add dimension to theta, otherwise transpose won't work
    if len(theta.shape) == 1:
        theta = theta[:, np.newaxis]

    # handle diag-case, where Ulow needs to be sliced from V
    if kind == "diag":
        # UncCov needs to be calculated inside in its own for-loop
        # V has dimension (len(sigma2) + Ntheta) * (len(sigma2) + Ntheta) --> slice a fitting Ulow of dimension (Ntheta x Ntheta)
        UncCov = np.zeros((len(sigma2)))

        for k in range(len(sigma2)):
            Ulow = V[k:k + Ntheta, k:k + Ntheta]
            UncCov[k] = np.squeeze(
                theta.T.dot(Ulow.dot(theta)) + np.abs(
                    np.trace(Ulow.dot(Utheta))))  # static part of uncertainty

    else:
        UncCov = theta.T.dot(Ulow.dot(theta)) + np.abs(
            np.trace(Ulow.dot(Utheta)))  # static part of uncertainty

    unc = np.zeros_like(y)
    for m in range(Ntheta, len(xlow)):
        XL = xlow[m:m - Ntheta:-1,
                  np.newaxis]  # extract necessary part from input signal
        unc[m] = XL.T.dot(Utheta.dot(XL))  # apply formula from paper
    ux = np.sqrt(np.abs(UncCov + unc))
    ux = np.roll(ux, -int(shift))  # correct for delay

    return x, ux.flatten()  # flatten in case that we still have 2D array
コード例 #58
0
ファイル: separation.py プロジェクト: shincling/TDAAv2
def _project_images(reference_sources, estimated_source, flen, G=None):
    """Least-squares projection of estimated source on the subspace spanned by
    delayed versions of reference sources, with delays between 0 and flen-1.
    Passing G as all zeros will populate the G matrix and return it so it can
    be passed into the next call to avoid recomputing G (this will only works
    if not computing permutations).
    """
    nsrc = reference_sources.shape[0]
    nsampl = reference_sources.shape[1]
    nchan = reference_sources.shape[2]
    reference_sources = np.reshape(np.transpose(reference_sources, (2, 0, 1)),
                                   (nchan * nsrc, nsampl), order='F')

    # computing coefficients of least squares problem via FFT ##
    # zero padding and FFT of input data
    reference_sources = np.hstack((reference_sources,
                                   np.zeros((nchan * nsrc, flen - 1))))
    estimated_source = \
        np.hstack((estimated_source.transpose(), np.zeros((nchan, flen - 1))))
    n_fft = int(2 ** np.ceil(np.log2(nsampl + flen - 1.)))
    sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=1)
    sef = scipy.fftpack.fft(estimated_source, n=n_fft)

    # inner products between delayed versions of reference_sources
    if G is None:
        saveg = False
        G = np.zeros((nchan * nsrc * flen, nchan * nsrc * flen))
        for i in range(nchan * nsrc):
            for j in range(i + 1):
                ssf = sf[i] * np.conj(sf[j])
                ssf = np.real(scipy.fftpack.ifft(ssf))
                ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
                              r=ssf[:flen])
                G[i * flen: (i + 1) * flen, j * flen: (j + 1) * flen] = ss
                G[j * flen: (j + 1) * flen, i * flen: (i + 1) * flen] = ss.T
    else:  # avoid recomputing G (only works if no permutation is desired)
        saveg = True  # return G
        if np.all(G == 0):  # only compute G if passed as 0
            G = np.zeros((nchan * nsrc * flen, nchan * nsrc * flen))
            for i in range(nchan * nsrc):
                for j in range(i + 1):
                    ssf = sf[i] * np.conj(sf[j])
                    ssf = np.real(scipy.fftpack.ifft(ssf))
                    ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
                                  r=ssf[:flen])
                    G[i * flen: (i + 1) * flen, j * flen: (j + 1) * flen] = ss
                    G[j * flen: (j + 1) * flen, i * flen: (i + 1) * flen] = ss.T

    # inner products between estimated_source and delayed versions of
    # reference_sources
    D = np.zeros((nchan * nsrc * flen, nchan))
    for k in range(nchan * nsrc):
        for i in range(nchan):
            ssef = sf[k] * np.conj(sef[i])
            ssef = np.real(scipy.fftpack.ifft(ssef))
            D[k * flen: (k + 1) * flen, i] = \
                np.hstack((ssef[0], ssef[-1:-flen:-1])).transpose()

    # Computing projection
    # Distortion filters
    try:
        C = np.linalg.solve(G, D).reshape(flen, nchan * nsrc, nchan, order='F')
    except np.linalg.linalg.LinAlgError:
        C = np.linalg.lstsq(G, D)[0].reshape(flen, nchan * nsrc, nchan,
                                             order='F')
    # Filtering
    sproj = np.zeros((nchan, nsampl + flen - 1))
    for k in range(nchan * nsrc):
        for i in range(nchan):
            sproj[i] += fftconvolve(C[:, k, i].transpose(),
                                    reference_sources[k])[:nsampl + flen - 1]
    # return G only if it was passed in
    if saveg:
        return sproj, G
    else:
        return sproj
コード例 #59
0
 def test_scalar_03(self):
     c = array([1, 2, 3])
     t = toeplitz(c, array([1]))
     assert_array_equal(t, [[1], [2], [3]])
コード例 #60
0
def generate_mai(m: int,
                 k: int,
                 N: int,
                 M: int,
                 sparse_variables_1: float = 0,
                 sparse_variables_2: float = 0,
                 signal: float = 1,
                 structure: str = 'identity',
                 sigma: float = 0.9,
                 decay: float = 0.5):
    mean = np.zeros(N + M)
    cov = np.zeros((N + M, N + M))
    p = np.arange(0, k)
    p = decay**p
    # Covariance Bit
    if structure == 'identity':
        cov_1 = np.eye(N)
        cov_2 = np.eye(M)
    elif structure == 'gaussian':
        x = np.linspace(-1, 1, N)
        x_tile = np.tile(x, (N, 1))
        mu_tile = np.transpose(x_tile)
        dn = 2 / (N - 1)
        cov_1 = gaussian(x_tile, mu_tile, sigma, dn)
        cov_1 /= cov_1.max()
        x = np.linspace(-1, 1, M)
        x_tile = np.tile(x, (M, 1))
        mu_tile = np.transpose(x_tile)
        dn = 2 / (M - 1)
        cov_2 = gaussian(x_tile, mu_tile, sigma, dn)
        cov_2 /= cov_2.max()
    elif structure == 'toeplitz':
        c = np.arange(0, N)
        c = sigma**c
        cov_1 = linalg.toeplitz(c, c)
        c = np.arange(0, M)
        c = sigma**c
        cov_2 = linalg.toeplitz(c, c)
    elif structure == 'random':
        if N < 2000:
            cov_1 = np.random.rand(N, N)
            U, S, V = np.linalg.svd(cov_1.T @ cov_1)
            cov_1 = U @ (1.0 + np.diag(np.random.rand(N))) @ V
        else:
            cov_1 = np.random.rand(N, N)
            cov_1 = cov_1.T @ cov_1
        if M < 2000:
            cov_2 = np.random.rand(M, M)
            U, S, V = np.linalg.svd(cov_2.T @ cov_2)
            cov_2 = U @ (1.0 + np.diag(np.random.rand(M))) @ V
        else:
            cov_2 = np.random.rand(M, M)
            cov_2 = cov_2.T @ cov_2

    cov[:N, :N] = cov_1
    cov[N:, N:] = cov_2
    del cov_1
    del cov_2
    """
    # Sparse Bits
    if sparse_variables_1 > 0:
        sparse_cov_1 = csr_matrix(cov_1)
        cov_1 = sparse_cov_1.copy()
    else:
        sparse_cov_1 = cov_1.copy()
    """
    up = np.random.rand(N, k) - 0.5
    for _ in range(k):
        if sparse_variables_1 > 0:
            if sparse_variables_1 < 1:
                sparse_variables_1 = np.ceil(sparse_variables_1 *
                                             N).astype('int')
            first = np.random.randint(N - sparse_variables_1)
            up[:first, _] = 0
            up[(first + sparse_variables_1):, _] = 0
        up[:, _] /= np.sqrt((up[:, _].T @ cov[:N, :N] @ up[:, _]))
        """
        if _ < (k - 1) and sparse_variables_1 == 0:
            proj = csr_matrix(up[:, _]).T @ csr_matrix(up[:, _])
            cov_1 = (identity(up[:, _].shape[0]) - proj) @ cov_1 @ (identity(up[:, _].shape[0]) - proj)
        """
    """
    # Elimination step:
    for _ in range(k):
        mat_1 = up.T @ sparse_cov_1 @ up
        up[:, (_ + 1):] -= np.outer(up[:, _], mat_1[_, (_ + 1):])
    """
    """
    if sparse_variables_2 > 0:
        sparse_cov_2 = csr_matrix(cov_2)
        cov_2 = sparse_cov_2.copy()
    else:
        sparse_cov_2 = cov_2.copy()
    """
    vp = np.random.rand(M, k) - 0.5
    for _ in range(k):
        if sparse_variables_2 > 0:
            if sparse_variables_2 < 1:
                sparse_variables_2 = np.ceil(sparse_variables_2 *
                                             M).astype('int')
            first = np.random.randint(M - sparse_variables_2)
            vp[:first, _] = 0
            vp[(first + sparse_variables_2):, _] = 0
        vp[:, _] /= np.sqrt((vp[:, _].T @ cov[N:, N:] @ vp[:, _]))
        """
        if _ < (k - 1) and sparse_variables_2 == 0:
            proj = csr_matrix(vp[:, _]).T @ csr_matrix(vp[:, _])
            cov_2 = (identity(vp[:, _].shape[0]) - proj) @ cov_2 @ (identity(vp[:, _].shape[0]) - proj)
        """
    """
    for _ in range(k):
        mat_2 = vp.T @ sparse_cov_2 @ vp
        vp[:, (_ + 1):] -= np.outer(vp[:, _], mat_2[_, (_ + 1):])
    """

    cross = np.zeros((N, M))
    for _ in range(k):
        cross += signal * p[_] * np.outer(up[:, _], vp[:, _])
    # Cross Bit
    cross = cov[:N, :N] @ cross @ cov[N:, N:]

    cov[N:, :N] = cross.T
    cov[:N, N:] = cross
    del cross

    if cov.shape[0] < 2000:
        X = np.random.multivariate_normal(mean, cov, m)
    else:
        X = np.zeros((m, N + M))
        chol = np.linalg.cholesky(cov)
        for _ in range(m):
            X[_, :] = chol_sample(mean, chol)
    Y = X[:, N:]
    X = X[:, :N]

    return X, Y, up, vp, cov