Exemplo n.º 1
0
Arquivo: test_LMS.py Projeto: RJTK/LMS
def system_identification1():
  '''
  Runs an example of LMS filtering for 1 step prediction on a WSS
  process.  We plot the actual result, the errors, as well as the
  convergence to the "correct" parameters.  This is essentially
  doint system identification.
  '''
  np.random.seed(2718)

  N = 5000 #Length of data
  mu = .02 #Step size
  p = 2 #Filter order

  #Filter for generating d(n)
  b = [1.]
  a = [1, -0.1, -0.8, 0.2]
  sv2 = .25 #Innovations noise variance

  #scale specifies standard deviation sqrt(sv2)
  v = gaussian.rvs(size = N, scale = math.sqrt(sv2)) #Innovations
  d = lfilter(b, a, v) #Desired process

  #Initialize LMS filter and then
  F = LMS(mu = mu, p = p) #Vanilla
#  F = LMS_Normalized(p = p, beta = 0.02) #Normalized
#  F = LMS_Sparse(p = p, mu = mu, g = 1.) #Sparse
  ff_fb = system_identification_setup(F)

  #Run it through the filter and get the error
  #Pay attention to the offsets.  d_hat[0] is a prediction of d[1].
  #We implicitly predict d[0] = 0
  w = np.array([ff_fb(di) for di in d])
  w = np.array(w)
  
  plt.plot(range(N), w[:,0], linewidth = 2, label = '$w[0]$')
  plt.plot(range(N), w[:,1], linewidth = 2, label = '$w[1]$')
  plt.plot(range(N), w[:,2], linewidth = 2, label = '$w[2]$')
  plt.hlines(-a[1], 0, N, linestyle = ':', label = '$-a[1]$')
  plt.hlines(-a[2], 0, N, linestyle = ':', label = '$-a[2]$')
  plt.hlines(-a[3], 0, N, linestyle = ':', label = '$-a[3]$')
  plt.legend()
  plt.ylim((-.5, 1))
  plt.xlabel('$n$')
  plt.ylabel('$w$')
  plt.title('System Identification')
  plt.show()
  return
Exemplo n.º 2
0
Arquivo: test_LMS.py Projeto: RJTK/LMS
def system_identification2():
  '''
  Runs an example of Sparse LMS filtering for 1 step prediction on a
  WSS process.  We plot the actual result, the errors, as well as the
  convergence to the "correct" parameters.  This is essentially doing
  system identification.

  The point of this is to compare the sparse vs non sparse LMS
  '''
  np.random.seed(2718)

  N = 5000 #Length of data
  mu = .005 #Step size
  p = 9 #Filter order

  #Filter for generating d(n)
  b = [1.]
  a = [1, -0.1, 0., 0., 0.3, 0., 0.2, 0., 0., 0., -0.3]
  sv2 = .25 #Innovations noise variance

  #scale specifies standard deviation sqrt(sv2)
  v = gaussian.rvs(size = N, scale = math.sqrt(sv2)) #Innovations
  d = lfilter(b, a, v) #Desired process

  #Initialize LMS filter and then
#  F = LMS_ZA(p = p, mu = mu, g = 0.01) #Sparse
  F = LMS_RZA(p = p, mu = mu, g = 0.05, eps = 10) #Reweighted Sparse
  ff_fb = system_identification_setup(F)

  #Run it through the filter and get the error
  #Pay attention to the offsets.  d_hat[0] is a prediction of d[1].
  #We implicitly predict d[0] = 0
  w = np.array([ff_fb(di) for di in d])
  w = np.array(w)
  
  for i in range(p):
    plt.plot(range(N), w[:, i], linewidth = 2)
    plt.hlines(-a[i + 1], 0, N, linestyle = ':')

  plt.ylim((-.5, 1))
  plt.xlabel('$n$')
  plt.ylabel('$w$')
  plt.title('Sparse System Identification')
  plt.show()
  return
Exemplo n.º 3
0
Arquivo: test_RLS.py Projeto: RJTK/LMS
def tracking_example3():
  '''
  Shows the RLS algorithm tracking a process with time varying statistics.
  '''
  np.random.seed(314)

  N = 5000 #Length of data
  lmbda = .98 #Forgetting factor
  p_RLS = 4 #RLS Filter order

  #Filter for generating d(n)
  p1_f = 1.35 #Frequency of pole 1
  p2_fA = 1.1 #Frequency of pole 2 amplitude
  p2_fP = 1.5 #Frequency of pole 2 phase
  def A(tau, t):
    a = [1, -0.1, -0.8, 0.2]
    p1 = 0.25 + 0.75*np.sin(2*np.pi*t*p1_f) #pole 1 position
    p2A = 0.15 + abs(0.75*np.sin(2*np.pi*t*p2_fA)) #pole 2 amplitude
    p2P = np.exp(2*np.pi*1j*t*p2_fP) #pole 2 phase
    p2 = p2A*p2P
    p3 = p2.conj() #pole 3
    a = np.poly([p1, p2, p3])
    return -a[tau + 1]

  b = [1., -.2, 0.8]

  B = lambda tau, t : b[tau]
  p = 3
  q = 1

  sv2 = 0.25 #Scale paremeter

  #Track a time varying process
  t = np.linspace(0, 1, N)
  f = 2
#  v = 4*np.sin(2*np.pi*f*t) + \
#      gaussian.rvs(size = N, scale = math.sqrt(sv2)) #Innovations
  v = gaussian.rvs(size = N, scale = math.sqrt(sv2))
#  v = pareto.rvs(beta, size = N, scale = math.sqrt(sv2))
#  d = lfilter(b, a, v) #Desired process
  d = tvfilt(B, A, v, p, q, t)

  #Initialize RLS filter and then
  #Get function closure implementing 1 step prediction
  F = RLS(p = p_RLS, lmbda = lmbda)
  ff_fb = one_step_pred_setup(F)

  #Run it through the filter and get the error
  d_hat = np.array([0] + [ff_fb(di) for di in d])[:-1]
  err = (d - d_hat)

  plt.subplot(2,1,1)
  plt.plot(range(N), d, linewidth = 2, linestyle = ':',
           label = 'True Process')
  plt.plot(range(N), d_hat, linewidth = 2, label = 'Prediction')
  plt.legend()
  plt.xlabel('$n$')
  plt.ylabel('Process Value')
  plt.title('RLS tracking a process ' \
            '$\\lambda = %s$, $p = %d$' % (lmbda, p))

  plt.subplot(2,1,2)
  plt.plot(range(N), err, linewidth = 2)
  plt.xlabel('$n$')
  plt.ylabel('Error')
  plt.title('Prediction Error')

  plt.show()

  F = RLS(p = p_RLS, lmbda = lmbda)
  ff_fb = system_identification_setup(F)
  w_hat = np.array([ff_fb(di) for di in d])

  a1 = np.array([A(0, ti) for ti in t])
  a2 = np.array([A(1, ti) for ti in t])
  a3 = np.array([A(2, ti) for ti in t])
  plt.plot(t, a1, linestyle = '--', label = '$a[1]$')
  plt.plot(t, a2, linestyle = '--', label = '$a[2]$')
  plt.plot(t, a3, linestyle = '--', label = '$a[3]$')
  plt.plot(t, w_hat[:, 0], label = '$w[0]$')
  plt.plot(t, w_hat[:, 1], label = '$w[1]$')
  plt.plot(t, w_hat[:, 2], label = '$w[2]$')
  plt.plot(t, w_hat[:, 3], label = '$w[3]$')
  plt.legend()
  plt.title('RLS Tracking ARMA Process, misspecified $p$')
  plt.show()
  return