def inference(self, input_, orders=None, *args, **kwargs): # Sanity check if not isinstance(input_, Signal): raise TypeError('!! Input must be an instance of Signal') if self.order_lock is not None: orders = self.order_lock if orders is not None: orders = self._check_orders(orders) # Calculate y = np.zeros_like(input_) if orders is None: pool = self.indices_full else: pool = [] for order in orders: pool += list( self.kernels.get_homogeneous_indices( order, self.memory_depth[order - 1], symmetric=False)) for lags in pool: # lags = (\tau_1, \tau_2, \cdots, \tau_k) # prod = h_k(\tau_1, \cdots, \tau_k) * \prod_{i=1}^k x[n-\tau_i] prod = self.kernels[lags] if prod == 0: continue for lag in lags: prod *= self._delay(input_, lag) y += prod output = Signal(y) output.__array_finalize__(input_) return output
def inference(self, input_, **kwargs): if not self.nn.built: raise AssertionError('!! Model has not been built yet') mlp_input = self._gen_mlp_input(input_) tfinput = TFData(mlp_input) output = self.nn.predict(tfinput, **kwargs).flatten() output = Signal(output) output.__array_finalize__(input_) return output
def inference(self, input_, orders=None, *args, **kwargs): # Sanity check if not isinstance(input_, Signal): raise TypeError('!! Input must be an instance of Signal') y = np.zeros_like(input_) orders = range(self.degree + 1) if orders is None else [orders] for n in orders: y += self.G_n(n, input_) output = Signal(y) output.__array_finalize__(input_) return output
def G_n(self, n, x): # Sanity check if n < 0: raise ValueError( '!! degree of any Wiener operator must be non-negative') if n == 0: y_n = self.kernels[()] * np.ones_like(x) else: y_n = np.zeros_like(x) for i in range(n // 2 + 1): y_n += self.G_n_i(n, i, x) output = Signal(y_n) output.__array_finalize__(x) return output
def response(self, input_, **kwargs): # Calculate y = np.zeros_like(input_) pool = self.kernels.params.keys() for lags in pool: assert isinstance(lags, tuple) # lags = (\tau_1, \tau_2, \cdots, \tau_k) # prod = h_k(\tau_1, \cdots, \tau_k) * \prod_{i=1}^k x[n-\tau_i] prod = self.kernels[lags] for lag in lags: prod *= self._delay(input_, lag) y += prod output = Signal(y) output.__array_finalize__(input_) return output
def inference(self, input_, orders=None, *args, **kwargs): if not isinstance(input_, Signal): raise TypeError('!! Input must be an instance of Signal') # Update Phi self._update_Phi_naive(input_) # Calculate output y = self.coefs[()] * np.ones_like(input_) pool = (self.coefs.get_indices(symmetric=False) if orders is None else self.coefs.get_homogeneous_indices( orders, self.memory_depth[orders + 1], symmetric=False)) for indices in pool: y_ = self.coefs[indices] * np.ones_like(input_) for index in indices: y_ *= self.Phi[index] y += y_ output = Signal(y) output.__array_finalize__(input_) return output
def G_n_i(self, n, i, x): y_i = np.zeros_like(x) # multiplicity is n - i indices_pool = self.kernels.get_homogeneous_indices( n - i, self.memory_depth[n - i - 1], symmetric=False) for indices in indices_pool: assert isinstance(indices, list) or isinstance(indices, tuple) # Determine indices lags = indices + indices[slice(n - 2 * i, n - i)] x_lags = indices[slice(n - 2 * i)] prod = self.kernels[lags] if prod == 0: continue for lag in x_lags: prod *= self._delay(x, lag) y_i += prod output = Signal(y_i * self._get_coef(n, i)) output.__array_finalize__(x) return output