Beispiel #1
0
    def log_p(self, params, data, constants, key, x):
        # Computes the log-likelihood for H and K knowing all other parameters for PaleoModel
        past, present, future = constants['past'](), constants['present'](
        ), constants['future']()
        if key == 'H':
            T = np.concatenate((params['T13']()[:past], data['T2']()))
            T = np.array([np.ones((present)), T])
            M = data['RP']() - np.dot(params['alpha'](), T)
            cov = self.get_toeplitz(present, x)
            slogdet = self.get_logdet(present, x)
            v = solve_toeplitz(cov, M)
            return -1 / 2 * slogdet - 1 / (2 * params['sigma_p']
                                           ()**2) * np.inner(M, v)

        if key == 'K':
            T = np.concatenate(
                (params['T13']()[:past], data['T2'](), params['T13']()[past:]))
            F = np.array(
                [np.ones((future)), data['S'](), data['V'](), data['C']()]).T
            cov = self.get_toeplitz(future, x)
            slogdet = self.get_logdet(future, x)
            M = T - np.dot(params['beta'](), F.T)
            v = solve_toeplitz(cov, M)
            return -1 / 2 * slogdet - 1 / (2 * params['sigma_T']
                                           ()**2) * np.inner(M, v)
 def time_step():
     """compute one iterate, update state and iter_count"""
     for l in range(1, L):
         u = self.state[l, 1:-1]
         # solve first equation:
         Cu = C.dot(u)
         w = linalg.solve_toeplitz(B_data, Cu)
         Cw = C.dot(w)
         v = linalg.solve_toeplitz(B_data, Cw)
         self.state[l, 1:-1] = v
     return
Beispiel #3
0
    def parameters_changed(self):
        # maximize -0.5 * (y . K^-1 y) - 0.5 log |K|
        # gradient wrt t is 0.5 tr((a a^T - K^-1)dK/dt), a = K^-1 a
        K_top = self.kern.from_dist(self.dists)
        a = la.solve_toeplitz(K_top, self.Y)
        all_grad = self.kern.kernel_gradient(self.dists)
        likelihood_grad = np.zeros(len(all_grad))
        for i, grad in enumerate(all_grad):
            dKdt = la.toeplitz(grad)
            Kinv_dKdt = la.solve_toeplitz(K_top, dKdt)
            aaT_dKdt = np.outer(a, dKdt.dot(a))
            trace = np.trace(aaT_dKdt - Kinv_dKdt)
            likelihood_grad[i] = 0.5 * trace

        self.kern.update_gradient(likelihood_grad)
Beispiel #4
0
def full_kernel(v, i, ksize, full_output=False):
    '''
    Calculates the full kernel from the recording v and the input
    current i. The last ksize steps of i should be null.
    ksize = size of the resulting kernel
    full_output = returns K,v0 if True (v0 is the resting potential)
    '''
    # Calculate the correlation vector <v(n)i(n-k)>
    # and the autocorrelation vector <i(n)i(n-k)>
    vi = zeros(ksize)
    ii = zeros(ksize)
    vref = mean(
        v)  # taking <v> as the reference potential simplifies the formulas
    iref = mean(i)
    v_corrected = v - vref
    i_corrected = i - iref
    for k in range(ksize):
        vi[k] = mean(v_corrected[k:] * i_corrected[:len(i) - k])
        ii[k] = mean(i_corrected[k:] * i_corrected[:len(i) - k])
    K = linalg.solve_toeplitz(ii, vi)
    #K = levinson_durbin(ii, vi) # obsolete
    if full_output:
        v0 = vref - iref * sum(K)
        return K, v0
    else:
        return K
Beispiel #5
0
def LS_Filter_Toeplitz(ref, srv, nlag, return_filter=False):
    '''Least squares clutter removal for passive radar. Computes filter taps
    by assuming that the autocorrelation matrix of the reference channel signal 
    is Hermitian and Toeplitz. Faster than the direct matrix inversion method,
    but inaccurate if the assumptions are violated (i.e. if the input signal is
    not wide sense stationary.)
    
    
    Parameters:
    ref: reference signal
    srv: surveillance signal
    nlag: filter length in samples
    return_filter: (bool) option to return filter taps as well as cleaned signal
    
    Returns:
    y: surveillance signal with clutter removed
    w: (optional) least squares filter taps

    '''
    rs = np.roll(ref, -10)

    # compute the first column of the autocorelation matrix of ref
    c = xcorr(rs, rs, 0, nlag)

    # compute the cross-correlation of ref and srv
    r = xcorr(srv, rs, 0, nlag)

    # solve the Toeplitz least squares problem
    w = solve_toeplitz(c, r)

    if return_filter:
        return srv - np.convolve(rs, w, mode='same'), w
    else:
        return srv - np.convolve(rs, w, mode='same')
Beispiel #6
0
 def x_star(lambd):
     lambd += 1e-14  # avoid numerical issues
     # lhs_inv = np.dot(V.T / (s + np.repeat(lambd, n_times_atom)), U.T)
     # return np.dot(lhs_inv, rhs)
     lhs_c_copy = lhs_c.copy()
     lhs_c_copy[0] += lambd
     return linalg.solve_toeplitz(lhs_c_copy, rhs)
Beispiel #7
0
def test_native_list_arguments():
    c = [1, 2, 4, 7]
    r = [1, 3, 9, 12]
    y = [5, 1, 4, 2]
    actual = solve_toeplitz((c, r), y)
    desired = solve(toeplitz(c, r=r), y)
    assert_allclose(actual, desired)
Beispiel #8
0
    def solve(self, b):
        """Solve self.dot(x) = b for x.

        Parameters
        ----------
        b: array_like

        Returns
        -------
        x: np.ndarray

        Examples
        --------
        >>> toep_op = Toeplitz([3, 2, 1])
        >>> toep_op.solve([1, 3, 2])
        array([-0.625,  1.5  , -0.125])
        >>> toep_op2 = Toeplitz([3, 2, 1], [6, 5, 4])
        >>> toep_op2.solve([1, 2, 3])
        array([-6.,  3.,  1.])

        See Also
        --------
        scipy.linalg.solve_toeplitz
        """
        diagonal_index = self.shape[0] - 1
        first_row = self._data[diagonal_index:]
        first_col = self._data[diagonal_index::-1]
        return solve_toeplitz((first_col, first_row), b)
Beispiel #9
0
def test_native_list_arguments():
    c = [1,2,4,7]
    r = [1,3,9,12]
    y = [5,1,4,2]
    actual = solve_toeplitz((c,r), y)
    desired = solve(toeplitz(c, r=r), y)
    assert_allclose(actual, desired)
Beispiel #10
0
def pade(signal, length, timestep, freqs, single_point = False):			# Adapted from J. J. Goings' code (https://github.com/jjgoings/pade)
	"""
	Approximate Fourier transform by pade approximants
	"""
	
	N = length//2

	d = -signal[N+1:2*N]

	try:
		from scipy.linalg import toeplitz, solve_toeplitz
		# Instead, form G = (c,r) as toeplitz
		#c = signal[N:2*N-1]
		#r = np.hstack((signal[1],signal[N-1:1:-1]))
		b = solve_toeplitz((signal[N:2*N-1], np.hstack((signal[1],signal[N-1:1:-1]))), d, check_finite=False)

	except (ImportError, np.linalg.linalg.LinAlgError) as e:  
		# OLD CODE: sometimes more stable
		# G[k,m] = signal[N - m + k] for m,k in range(1,N)
		G = signal[N + np.arange(1, N)[:,None] - np.arange(1, N)]
		b = np.linalg.solve(G, d)

	b = np.hstack((1,b))
	a = np.dot(np.tril(toeplitz(signal[0:N])),b)
	p = np.poly1d(a)
	q = np.poly1d(b)

	if single_point:
		return p, q
	else:
		W = np.exp(-1j * freqs * timestep * 2 * np.pi)
		return p(W)/q(W)
def simul_beta2(model, noise_H, noise_K):
    '''
    Simulate beta in Gibbs sampler knowing all other parameters
    '''
    params, data, constants = model.params, model.data, model.constants
    # return params['beta']()
    past, future = constants['past'](), constants['future']()
    T = np.concatenate(
        (params['T13']()[:past], data['T2'](), params['T13']()[past:]))
    cov_top = noise_K.get_toeplitz(future, params['K']())
    M = np.tril(toeplitz(cov_top))
    cov_top = cov_top / (1 - params['K']()**2)
    S = data['S']()
    V = data['V']()
    C = data['C']()
    v = np.array([
        np.dot(M, np.ones((future))),
        np.dot(M, S),
        np.dot(M, V),
        np.dot(M, C)
    ])
    b = solve_toeplitz(cov_top, v.T)

    P1 = np.dot(v, b)

    omega = np.linalg.inv(1 / params['sigma_T']()**2 * P1 + np.identity(4))
    mu = (1 / params['sigma_T']()**2) * omega.dot(T.dot(b))
    a = np.random.multivariate_normal(mean=mu, cov=omega)
    return a
Beispiel #12
0
def spec2lsf(spec, lsf_order=30):

    NFFT = 2 * (spec.shape[0] - 1)
    n_frames = spec.shape[1]

    p = lsf_order

    lsf = np.zeros((n_frames, lsf_order), dtype=np.float64)
    spec_rec = np.zeros(spec.shape)

    for i, spec_vec in enumerate(spec.T):

        # floor reconstructed spectrum
        spec_vec = np.maximum(spec_vec, 1e-9)

        # squared magnitude 2-sided spectrum
        twoside = np.r_[spec_vec, np.flipud(spec_vec[1:-1])]
        twoside = np.square(twoside)
        r = np.fft.ifft(twoside)
        r = r.real

        # levinson-durbin
        a = LA.solve_toeplitz(r[0:p], r[1:p + 1])
        a = np.r_[1.0, -1.0 * a]

        lsf[i, :] = poly2lsf(a)

        # reconstructed all-pole spectrum
        w, H = freqz(b=1.0, a=a, worN=NFFT, whole=True)
        spec_rec[:, i] = np.abs(H[:(NFFT / 2 + 1)])

    return lsf, spec_rec
Beispiel #13
0
def computeLpcFast(signal, order):
    y = np.correlate(signal, signal, 'full')
    y = y[(len(signal) - 1):]
    xlpc = lpc_solve.solve_toeplitz(y[0:order], -y[1:order + 1])
    xlpc = np.append(1, xlpc)
    gg = y[0] + np.sum(xlpc * y[1:order + 2])
    return xlpc, gg
def simul_s_T2(model, noise_H, noise_K):
    '''
    Simulate sigma_T in Gibbs sampler knowing all other parameters
    '''
    params, data, constants = model.params, model.data, model.constants
    # return params['sigma_T']()
    past, future = constants['past'](), constants['future']()
    T = np.concatenate(
        (params['T13']()[:past], data['T2'](), params['T13']()[past:]))
    cov_top = noise_K.get_toeplitz(future, params['K']())
    M = np.tril(toeplitz(cov_top))
    cov_top = cov_top / (1 - params['K']()**2)
    S = data['S']()
    V = data['V']()
    C = data['C']()
    v = np.array([
        np.dot(M, np.ones((future))),
        np.dot(M, S),
        np.dot(M, V),
        np.dot(M, C)
    ])
    P1 = np.dot(params['beta'](), v)
    b = solve_toeplitz(cov_top, T - P1)
    P2 = np.dot((T - P1).T, b)
    q = 2 + future / 2
    r = 0.1 + 1 / 2 * P2
    return np.sqrt(1 / np.random.gamma(shape=q, scale=1 / r))
Beispiel #15
0
 def cl(frame: np.array):
     phi = __compute_phi(frame, m)
     c = phi[:-1]
     r = phi[:-1]
     b = phi[1:]
     a = sl.solve_toeplitz((-c, -r), b)
     assert len(a) == m
     return a
Beispiel #16
0
def pade(time, dipole):
    damp_const = 100.0
    dipole = np.asarray(dipole) - dipole[0]

    stepsize = time[1] - time[0]
    #print dipole
    damp = np.exp(-(stepsize * np.arange(len(dipole))) / float(damp_const))
    dipole *= damp
    M = len(dipole)
    N = int(np.floor(M / 2))

    #print "N = ", N
    num_pts = 20000
    if N > num_pts:
        N = num_pts
    #print "Trimmed points to: ", N

    # G and d are (N-1) x (N-1)
    # d[k] = -dipole[N+k] for k in range(1,N)
    d = -dipole[N + 1:2 * N]

    try:
        from scipy.linalg import toeplitz, solve_toeplitz
    except ImportError:
        print("You'll need SciPy version >= 0.17.0")

    try:
        # Instead, form G = (c,r) as toeplitz
        #c = dipole[N:2*N-1]
        #r = np.hstack((dipole[1],dipole[N-1:1:-1]))
        b = solve_toeplitz((dipole[N:2*N-1],\
            np.hstack((dipole[1],dipole[N-1:1:-1]))),d,check_finite=False)
    except np.linalg.linalg.LinAlgError:
        # OLD CODE: sometimes more stable
        # G[k,m] = dipole[N - m + k] for m,k in range(1,N)
        G = dipole[N + np.arange(1, N)[:, None] - np.arange(1, N)]
        b = np.linalg.solve(G, d)

    # Now make b Nx1 where b0 = 1
    b = np.hstack((1, b))

    # b[m]*dipole[k-m] for k in range(0,N), for m in range(k)
    a = np.dot(np.tril(toeplitz(dipole[0:N])), b)
    p = np.poly1d(a)
    q = np.poly1d(b)

    # If you want energies greater than 2*27.2114 eV, you'll need to change
    # the default frequency range to something greater.

    #frequency = np.arange(0.00,2.0,0.00005)
    frequency = np.arange(0.3, 0.75, 0.0002)

    W = np.exp(-1j * frequency * stepsize)

    fw = p(W) / q(W)

    return fw, frequency
Beispiel #17
0
 def calc_lpc(self, frame):
     n = self.settings.N_TAPS
     fft_frame = np.fft.fft(frame)
     acorr = np.real(np.fft.ifft(fft_frame * np.conj(fft_frame)))
     b = acorr[1:n]
     p = solve_toeplitz((acorr[0:n - 1], acorr[0:n - 1]), b)
     p0 = np.array([1])
     P = np.concatenate((p0, -1 * p))
     return P
Beispiel #18
0
 def update(self, x):
     self.x = np.roll(self.x, 1)
     self.x[0] = x
     self.mu = (1.0 - self.r) * self.mu + self.r * x
     self.C = (1.0 - self.r) * self.C
     self.C += self.r * (x - self.mu) * (self.x - self.mu)
     A = solve_toeplitz(self.C[:-1], self.C[1:])
     pred = np.dot(A, self.x[1:] - self.mu) + self.mu
     self.sigma = (1.0 - self.r) * self.sigma + self.r * (x - pred)**2
     return -np.log(norm.pdf(x=x, loc=pred, scale=self.sigma**0.5))
def simul_T3(model, noise_H, noise_K):
    '''
    Simulate T13 in Gibbs sampler knowing all other parameters
    '''
    params, data, constants = model.params, model.data, model.constants
    past, present, future = constants['past'](), constants['present'](), constants['future']()
    cov_top_H = noise_K.get_toeplitz(present, params['H']())
    cov_top_K = noise_K.get_toeplitz(future, params['K']())
    M_H = np.tril(toeplitz(cov_top_H))
    M_K = np.tril(toeplitz(cov_top_K))
    cov_top_H = cov_top_H / (1-params['H']()**2)
    cov_top_K = cov_top_K / (1-params['K']()**2)
    S = params['S']()
    V = params['V']()
    C = params['C']()
    v = np.array([np.dot(M_K, np.ones((future))), np.dot(M_K,S), np.dot(M_K,V), np.dot(M_K,C)])

    inv_covK = np.linalg.inv(toeplitz(cov_top_K))
    inv_covH = np.linalg.inv(toeplitz(cov_top_H))

    P1 = np.pad(np.dot(M_H.T, np.dot(inv_covH, M_H)), pad_width = (0,future-present), mode='constant')
    omega = np.linalg.inv((params['alpha']()[1]/params['sigma_p']())**2*P1 + 1/params['sigma_T']()**2 * inv_covK)
    a = solve_toeplitz(cov_top_K, np.dot(params['beta'](),v))
    b = np.pad(np.dot(M_H.T, solve_toeplitz(cov_top_H, data['RP']() - params['alpha']()[0]*np.dot(M_H, np.ones(present)))), pad_width=(0,future-present), mode='constant')
    mu = 1/params['sigma_T']()**2*np.dot(omega, a) + params['alpha']()[1]/params['sigma_p']()**2*np.dot(omega, b)
    # T = mu + np.dot(np.linalg.cholesky(omega), np.random.randn(len(mu)))
    # print(params['alpha']()[1]/params['sigma_p']()**2*np.dot(omega, b))

    M1 = omega[past:present, past:present]
    M1_inv = np.linalg.inv(M1)
    M2 = np.concatenate((omega[:past, past:present], omega[present:future, past:present]), axis = 0)
    M3 = np.concatenate((np.concatenate((omega[:past,:past],omega[:past, present:future]), axis = 1), np.concatenate((omega[present:future,:past], omega[present:future,present:future]), axis = 1)), axis = 0)
    M3_inv = np.linalg.inv(M3)

    # Bloc 1-3
    new_mean13 = np.concatenate((mu[:past], mu[present:future])) + np.dot(M2,np.dot(M1_inv, data['T2']() - mu[past:present]))
    new_cov13 = M3 - np.dot(M2, np.dot(M1_inv, M2.T))
    T13 = new_mean13 + np.dot(np.linalg.cholesky(new_cov13), np.random.randn(len(new_mean13)))
    # Bloc 2
    new_mean2 = mu[past:present] + np.dot(M2.T,np.dot(M3_inv, T13 - np.concatenate((mu[:past], mu[present:future]))))
    new_cov2 = M1 - np.dot(M2.T, np.dot(M3_inv, M2))
    T2 = new_mean2 + np.dot(np.linalg.cholesky(new_cov2), np.random.randn(len(new_mean2)))
    return T13, T2
Beispiel #20
0
 def log_likelihood(self):
     K_top = self.kern.from_dist(self.dists)
     KinvY = la.solve_toeplitz(K_top, self.Y)
     # Prevent slight negative eigenvalues from roundoff.
     sign, logdet = np.linalg.slogdet(
         la.toeplitz(K_top) + 1e-10 * np.identity(len(K_top)))
     print(self.dists)
     print(K_top)
     assert sign > 0, (sign, logdet)
     return -0.5 * self.Y.dot(KinvY) - 0.5 * logdet
Beispiel #21
0
def getL(h, z):

    # https://www.hindawi.com/journals/mpe/2014/490568/
    r, v = getR(h, len(z))
    s, b = np.linalg.slogdet(r)
    if s != 1:
        1 / 0
    ll = -np.dot(z, solve_toeplitz(v, z)) / 2
    ll -= b / 2
    return ll
Beispiel #22
0
def lpc_atc(sig, order, levinson=True, stable=True):
    """Linear predictive coding using the autocorrelation method.

    Parameters
    ----------
    sig: array_like
        (Usually windowed) time-domain sequence.
    order: int
        LPC order.
    levinson: bool, optional
        Use Levinson-Durbin recursion? Default to True.
    stable: bool, optional
        Enforce stability for pole locations? Default to True.

    Returns
    -------
    alphas: numpy.ndarray
        `order`-point LPC coefficients: [a1, a2, ..., ap].
        The all-pole filter can be reconstructed from the diff eq:
            y[n] = G*x[n] + a1*y[n-1] + a2*y[n-2] + ... + ap*y[n-p]
    gain: float
        Filter gain.

    """
    rxx = xcorr(sig)
    if levinson:  # use levinson-durbin recursion
        try:
            alphas = solve_toeplitz(rxx[:order], rxx[1:order + 1])
        except np.linalg.linalg.LinAlgError:
            print("Singular matrix!! Adding small value to phi[0].")
            print(rxx[:order])
            rxx[0] += 1e-9
            alphas = solve_toeplitz(rxx[:order], rxx[1:order + 1])
    else:  # solve by direct inversion.
        alphas = inv(toeplitz(rxx[:order])).dot(rxx[1:order + 1])
    if stable and (not lpc_is_stable(alphas)):
        print("Unstable LPC detected!! Reflecting back to unit circle.")
        alphas = lpc2stable(alphas)

    gain = np.sqrt(rxx[0] - rxx[1:order + 1].dot(alphas))

    return alphas, gain
def test_reflection_coeffs():
    # check that that the partial solutions are given by the reflection
    # coefficients

    random = np.random.RandomState(1234)
    y_d = random.randn(10)
    y_z = random.randn(10) + 1j
    reflection_coeffs_d = [1]
    reflection_coeffs_z = [1]
    for i in range(2, 10):
        reflection_coeffs_d.append(solve_toeplitz(y_d[:(i - 1)], b=y_d[1:i])[-1])
        reflection_coeffs_z.append(solve_toeplitz(y_z[:(i - 1)], b=y_z[1:i])[-1])

    y_d_concat = np.concatenate((y_d[-2:0:-1], y_d[:-1]))
    y_z_concat = np.concatenate((y_z[-2:0:-1].conj(), y_z[:-1]))
    _, ref_d = levinson(y_d_concat, b=y_d[1:])
    _, ref_z = levinson(y_z_concat, b=y_z[1:])

    assert_allclose(reflection_coeffs_d, ref_d[:-1])
    assert_allclose(reflection_coeffs_z, ref_z[:-1])
def computeLpcFast(signal, order, keepreal=True):
    y = np.fft.ifft(
        np.fft.fft(signal, len(signal)) *
        np.conj(np.fft.fft(signal, len(signal))))
    if keepreal:
        y = np.real(y)
    xlpc = lpc_solve.solve_toeplitz(y[0:order], -y[1:order + 1])
    xlpc = np.append(1, xlpc)
    gg = y[0] + np.sum(xlpc * y[1:order + 2])

    return xlpc, gg
Beispiel #25
0
def toeplitz_solve(a, b, c):
    # For some reason, `sla.solve_toeplitz` sometimes fails with a `ValueError`, saying
    # that the buffer source array is read-only. We resolve this issue by copying the
    # inputs....
    # TODO: Resolve this properly.
    a = np.copy(a)
    b = np.copy(b)
    c = np.copy(c)
    res_dtype = promote_dtype_of_tensors(a, b, c)
    row = np.concatenate((a[:1], b))  # First row of the Toeplitz matrix
    return sla.solve_toeplitz((a, row), c).astype(res_dtype)
Beispiel #26
0
def test_reflection_coeffs():
    # check that that the partial solutions are given by the reflection
    # coefficients

    random = np.random.RandomState(1234)
    y_d = random.randn(10)
    y_z = random.randn(10) + 1j
    reflection_coeffs_d = [1]
    reflection_coeffs_z = [1]
    for i in range(2, 10):
        reflection_coeffs_d.append(solve_toeplitz(y_d[:(i-1)], b=y_d[1:i])[-1])
        reflection_coeffs_z.append(solve_toeplitz(y_z[:(i-1)], b=y_z[1:i])[-1])

    y_d_concat = np.concatenate((y_d[-2:0:-1], y_d[:-1]))
    y_z_concat = np.concatenate((y_z[-2:0:-1].conj(), y_z[:-1]))
    _, ref_d = levinson(y_d_concat, b=y_d[1:])
    _, ref_z = levinson(y_z_concat, b=y_z[1:])

    assert_allclose(reflection_coeffs_d, ref_d[:-1])
    assert_allclose(reflection_coeffs_z, ref_z[:-1])
Beispiel #27
0
def LS_Filter_Toeplitz(refChannel,
                       srvChannel,
                       filterLen,
                       peek=10,
                       return_filter=False):
    '''Block east squares adaptive filter. Computes filter coefficients using
    scipy's solve_toeplitz function. This assumes the autocorrelation matrix of 
    refChannel is Hermitian and Toeplitz (i.e. wide the reference signal is
    wide sense stationary). Faster than the direct matrix inversion method but
    inaccurate if the assumptions are violated. 
    
    Parameters:
        refChannel:     Array containing the reference channel signal
        srvChannel:     Array containing the surveillance channel signal
        filterLen:   Length of the least squares filter (in samples)
        peek:           Number of noncausal filter taps. Set to zero for a 
                        causal filter. If nonzero, clutter estimates can depend 
                        on future values of the reference signal (this helps 
                        sometimes)
        return_filter:  Boolean indicating whether to return the filter taps

    Returns:
        srvChannelFiltered: Surveillance channel signal with clutter removed
        filterTaps:     (optional) least squares filter taps

    '''

    if refChannel.shape != srvChannel.shape:
        raise ValueError(f'''Input vectors must have the same length - 
        got {refChannel.shape} and {srvChannel.shape}''')

    # shift reference channel because for some reason the filtering works
    # better when you allow the clutter filter to be noncausal
    refChannelShift = np.roll(refChannel, -1 * peek)

    # compute the first column of the autocorelation matrix of ref
    autocorrRef = xcorr(refChannelShift, refChannelShift, 0,
                        filterLen + peek - 1)

    # compute the cross-correlation of ref and srv
    xcorrSrvRef = xcorr(srvChannel, refChannelShift, 0, filterLen + peek - 1)

    # solve the Toeplitz least squares problem
    filterTaps = solve_toeplitz(autocorrRef, xcorrSrvRef)

    # compute clutter signal and remove from surveillance Channel
    clutter = np.convolve(refChannelShift, filterTaps, mode='full')
    clutter = clutter[0:srvChannel.shape[0]]
    srvChannelFiltered = srvChannel - clutter

    if return_filter:
        return srvChannelFiltered, filterTaps
    else:
        return srvChannelFiltered
Beispiel #28
0
def computeLpcFast(signal,order):
    #y=np.correlate(signal,signal,'full')
    #y=y[(len(signal)-1):]
    y=np.fft.ifft(np.fft.fft(signal,len(signal))*np.conj(np.fft.fft(signal,len(signal))))
    y=np.real(y)  
    xlpc=lpc_solve.solve_toeplitz(y[0:order],-y[1:order+1])
    xlpc=np.append(1,xlpc)
    gg=y[0]+np.sum(xlpc*y[1:order+2])
    
    #xlpc=np.random.rand(order)
    #gg=1
    return xlpc, gg
Beispiel #29
0
def test_multiple_rhs():
    random = np.random.RandomState(1234)
    c = random.randn(4)
    r = random.randn(4)
    for offset in [0, 1j]:
        for yshape in ((4, ), (4, 3), (4, 3, 2)):
            y = random.randn(*yshape) + offset
            actual = solve_toeplitz((c, r), b=y)
            desired = solve(toeplitz(c, r=r), y)
            assert_equal(actual.shape, yshape)
            assert_equal(desired.shape, yshape)
            assert_allclose(actual, desired)
Beispiel #30
0
def test_multiple_rhs():
    random = np.random.RandomState(1234)
    c = random.randn(4)
    r = random.randn(4)
    for offset in [0, 1j]:
        for yshape in ((4,), (4, 3), (4, 3, 2)):
            y = random.randn(*yshape) + offset
            actual = solve_toeplitz((c,r), b=y)
            desired = solve(toeplitz(c, r=r), y)
            assert_equal(actual.shape, yshape)
            assert_equal(desired.shape, yshape)
            assert_allclose(actual, desired)
Beispiel #31
0
    def log_p2(self, params, data, constants, key, x):
        # Computes the log-likelihood for H and K knowing all other parameters for PaleoModel2
        past, present, future = constants['past'](), constants['present'](
        ), constants['future']()
        if key == 'H':
            T = np.concatenate((params['T13']()[:past], data['T2']()))
            cov_top = self.get_toeplitz(present, x)
            M = np.tril(toeplitz(cov_top))
            cov_top = cov_top / (1 - x**2)
            u = np.array([np.dot(M, np.ones(len(T))), np.dot(M, T)])

            P1 = np.dot(params['alpha'](), u)
            b = solve_toeplitz(cov_top, data['RP']() - P1)
            P2 = np.dot((data['RP']() - P1).T, b)

            slogdet = self.get_logdet(present, x)
            return -1 / 2 * slogdet - 1 / (2 * params['sigma_p']()**2) * P2

        if key == 'K':
            T = np.concatenate(
                (params['T13']()[:past], data['T2'](), params['T13']()[past:]))
            cov_top = self.get_toeplitz(future, x)
            M = np.tril(toeplitz(cov_top))
            cov_top = cov_top / (1 - x**2)
            S = data['S']()
            V = data['V']()
            C = data['C']()
            v = np.array([
                np.dot(M, np.ones((future))),
                np.dot(M, S),
                np.dot(M, V),
                np.dot(M, C)
            ])
            P1 = np.dot(params['beta'](), v)
            b = solve_toeplitz(cov_top, T - P1)
            P2 = np.dot((T - P1).T, b)

            slogdet = self.get_logdet(future, x)
            return -1 / 2 * slogdet - 1 / (2 * params['sigma_T']()**2) * P2
Beispiel #32
0
def test_solve_equivalence():
    # For toeplitz matrices, solve_toeplitz() should be equivalent to solve().
    random = np.random.RandomState(1234)
    for n in (1, 2, 3, 10):
        c = random.randn(n)
        if random.rand() < 0.5:
            c = c + 1j * random.randn(n)
        r = random.randn(n)
        if random.rand() < 0.5:
            r = r + 1j * random.randn(n)
        y = random.randn(n)
        if random.rand() < 0.5:
            y = y + 1j * random.randn(n)

        # Check equivalence when both the column and row are provided.
        actual = solve_toeplitz((c, r), y)
        desired = solve(toeplitz(c, r=r), y)
        assert_allclose(actual, desired)

        # Check equivalence when the column is provided but not the row.
        actual = solve_toeplitz(c, b=y)
        desired = solve(toeplitz(c), y)
        assert_allclose(actual, desired)
Beispiel #33
0
def lpcoeff(speech_frame, model_order):
    # ----------------------------------------------------------
    # (1) Compute Autocorrelation Lags
    # ----------------------------------------------------------

    R = correlate(speech_frame, speech_frame)
    R = R[len(speech_frame) - 1:len(speech_frame) + model_order]
    # ----------------------------------------------------------
    # (2) Levinson-Durbin
    # ----------------------------------------------------------
    lpparams = np.ones((model_order + 1))
    lpparams[1:] = solve_toeplitz(R[0:-1], -R[1:])

    return (lpparams, R)
Beispiel #34
0
def test_solve_equivalence():
    # For toeplitz matrices, solve_toeplitz() should be equivalent to solve().
    random = np.random.RandomState(1234)
    for n in (1, 2, 3, 10):
        c = random.randn(n)
        if random.rand() < 0.5:
            c = c + 1j * random.randn(n)
        r = random.randn(n)
        if random.rand() < 0.5:
            r = r + 1j * random.randn(n)
        y = random.randn(n)
        if random.rand() < 0.5:
            y = y + 1j * random.randn(n)

        # Check equivalence when both the column and row are provided.
        actual = solve_toeplitz((c,r), y)
        desired = solve(toeplitz(c, r=r), y)
        assert_allclose(actual, desired)

        # Check equivalence when the column is provided but not the row.
        actual = solve_toeplitz(c, b=y)
        desired = solve(toeplitz(c), y)
        assert_allclose(actual, desired)
Beispiel #35
0
def test_unstable():
    # this is a "Gaussian Toeplitz matrix", as mentioned in Example 2 of
    # I. Gohbert, T. Kailath and V. Olshevsky "Fast Gaussian Elimination with
    # Partial Pivoting for Matrices with Displacement Structure"
    # Mathematics of Computation, 64, 212 (1995), pp 1557-1576
    # which can be unstable for levinson recursion.

    # other fast toeplitz solvers such as GKO or Burg should be better.
    random = np.random.RandomState(1234)
    n = 100
    c = 0.9 ** (np.arange(n)**2)
    y = random.randn(n)

    solution1 = solve_toeplitz(c, b=y)
    solution2 = solve(toeplitz(c), y)

    assert_allclose(solution1, solution2)
Beispiel #36
0
    def estimate(self, freqs, amps, order=None):
        if order is None:
            order = self.order
        omegas = 2*np.pi*freqs/self.sr
        amps = np.abs(amps)
        nharm = len(amps)

        # imaginary part of z variable
        ejw = np.exp(-1j*omegas * np.arange(0,order+1))
        inv_ejw = np.exp(1j*omegas * np.arange(0,order+1))

        # target autocorr matrix
        r = 1/nharm*np.real(amps**2*inv_ejw)
        rmx_inv = sla.inv(sla.toeplitz(r))

        # initial guess (LPC)
        use_r = r[:order]
        a = sla.solve_toeplitz(r[:-1],-r[1:])
    def pade_tx(self,dipole_direction='x',spectra='abs',damp_const=5500,
        num_pts=10000):
        # num_pts: number of points to sample for pade transformation

        if (spectra.lower() == 'abs') or (spectra.lower() == 'power'):  
            if dipole_direction.lower() == 'x':
                dipole = self.electricDipole.x
                kick_strength = self.electricField.x[0]
            elif dipole_direction.lower() == 'y':
                dipole = self.electricDipole.y
                kick_strength = self.electricField.y[0]
            elif dipole_direction.lower() == 'z':
                dipole = self.electricDipole.z
                kick_strength = self.electricField.z[0]
            else:
                print "Not a valid direction for the dipole! Try: x,y,z "
        elif spectra.lower() == 'ecd':
            if dipole_direction.lower() == 'x':
                dipole = self.magneticDipole.x
                kick_strength = self.electricField.x[0]
            elif dipole_direction.lower() == 'y':
                dipole = self.magneticDipole.y
                kick_strength = self.electricField.y[0]
            elif dipole_direction.lower() == 'z':
                dipole = self.magneticDipole.z
                kick_strength = self.electricField.z[0]
            else:
                print "Not a valid direction for the dipole! Try: x,y,z "
        else: 
            print "Not a valid spectra choice"

        if np.isclose(kick_strength,0.0):
            if dipole_direction.lower() == 'x':
                kick_strength = max(self.electricField.x)
            elif dipole_direction.lower() == 'y':
                kick_strength = max(self.electricField.y)
            elif dipole_direction.lower() == 'z':
                kick_strength = max(self.electricField.z)
            if np.isclose(kick_strength,0.0):
                print "Kick strength = 0. Make sure you FFT'd the correct direction"
                sys.exit(0)
            print "It looks like you are not perturbing the field at time = 0"
            print "so we are taking the maximum of the electric field instead"
            print "This may not be the functionality you want."
 

        # skip is integer to skip every n-th value
        # skip = 1 would not skip any values, but skip = 10 would only
        # consider every tenth value
        skip = 1 
        dipole = dipole - dipole[0]
        dipole = dipole[::skip]
        damp = np.exp(-(self.time-self.time[0])/float(damp_const))
        damp = damp[::skip]
        dipole = dipole * damp

        timestep = skip*(self.time[2] - self.time[1])
        M = len(dipole)
        N = int(np.floor(M / 2))

        print "N = ", N
        if N > num_pts:
            N = num_pts
        print "Trimmed points to: ", N

        # G and d are (N-1) x (N-1)
        # d[k] = -dipole[N+k] for k in range(1,N)
        d = -dipole[N+1:2*N] 

        # Old code, which works with regular Ax=b linear solver. 
        # G[k,m] = dipole[N - m + k] for m,k in range(1,N)
        #G = dipole[N + np.arange(1,N)[:,None] - np.arange(1,N)]
        #b = solve(G,d,check_finite=False)

        # Toeplitz linear solver using Levinson recursion
        # Should be O(n^2), and seems to work well, but if you get strange
        # results you may want to switch to regular linear solver which is much
        # more stable.
        try:
            from scipy.linalg import toeplitz, solve_toeplitz
        except ImportError:
            print "You'll need SciPy version >= 0.17.0"
            
        # Instead, form G = (c,r) as toeplitz
        #c = dipole[N:2*N-1]
        #r = np.hstack((dipole[1],dipole[N-1:1:-1]))
        b = solve_toeplitz((dipole[N:2*N-1],\
            np.hstack((dipole[1],dipole[N-1:1:-1]))),d,check_finite=False)
      
        # Now make b Nx1 where b0 = 1 
        b = np.hstack((1,b)) 

        # b[m]*dipole[k-m] for k in range(0,N), for m in range(k) 
        a = np.dot(np.tril(toeplitz(dipole[0:N])),b)

        p = np.poly1d(a)
        q = np.poly1d(b)

        # If you want energies greater than 2*27.2114 eV, you'll need to change
        # the default frequency range to something greater.
        self.frequency = np.arange(0,2,0.000025)
        W = np.exp(-1j*self.frequency*timestep)

        fw_re = np.real(p(W)/q(W))
        fw_im = np.imag(p(W)/q(W))

        if np.any(np.isinf(self.frequency)) or np.any(np.isnan(self.frequency)):
            print "Check your dT: frequency contains NaNs and/or Infs!"
            sys.exit(0)

        if spectra.lower() == 'abs':
            self.fourier = \
                #-((4.0*self.frequency*fw_im)/(kick_strength*damp_const))
                ((4.0*self.frequency*np.pi*fw_im)/(3.0*137*kick_strength))