def online_pipe(self, data: NDArray) -> NDArray: """ The method get the data as ndarray with dimensions of (n_channels, n_samples). The method returns the features for the given data. :param data: ndarray with the shape (n_channels, n_samples) :return: ndarray with the shape of (1, n_features) """ # Prepare the data to MNE functions data = data.astype(np.float64) # Filter the data (band-pass only) data = mne.filter.filter_data(data, l_freq=8, h_freq=30, sfreq=self.eeg.sfreq, verbose=False) # Laplacian data = self.eeg.laplacian(data, self.eeg.get_board_names()) # Normalize scaler = StandardScaler() data = scaler.fit_transform(data.T).T # Extract features funcs_params = {'pow_freq_bands__freq_bands': np.array([8, 10, 12.5, 30])} selected_funcs = ['pow_freq_bands', 'variance'] X = extract_features(data[np.newaxis], self.eeg.sfreq, selected_funcs, funcs_params)[0] return X
def __call__(self, x: NDArray) -> NDArray: """Evaluate randomized IR (i.e. its random realization) at given points Args: x (NDArray): query points for IR. If x is 1D, single realization of RIR is evaluated; if x is 2D, signature is (N_query, N_batch): for each row new realization is generated Returns: NDArray: realization of randomized IR, same size """ if len(x.shape) == 1: x = x.reshape((x.size, 1)) N_query, N_batch = x.shape if self.factor_generator is not None: factors = self.factor_generator(N_batch).reshape((1, N_batch)) else: factors = np.ones((1, N_batch)) if self.base_ir_type == 'frozen': realization_interp = self._interp_realization(self.base_ir_frozen) y = realization_interp(x).reshape((N_query, N_batch)) elif self.base_ir_type == 'generated': y = np.zeros((N_query, N_batch)) for i_batch, x_query in enumerate(x.T): realization_interp = self._interp_realization(self.base_ir_generator()) y[:, i_batch] = realization_interp(x_query) return factors * y
def test_type_of(self): arr1 = np.array([1, 2, 3]) arr2 = np.array([1, 2, '3']) arr3 = np.array([1, 2, 3.0]) arr4 = np.array([1, 2, {}]) arr5 = np.array([True, True, True]) t1 = NDArray.type_of(arr1) t2 = NDArray.type_of(arr2) t3 = NDArray.type_of(arr3) t4 = NDArray.type_of(arr4) t5 = NDArray.type_of(arr5) self.assertIsInstance(arr1, t1) self.assertIsInstance(arr2, t2) self.assertIsInstance(arr3, t3) self.assertIsInstance(arr4, t4) self.assertIsInstance(arr5, t5)
def encode_state(self, image: NDArray): """ Function for converting the image got from the environment to its feature-representation Args: image: Image obtained from the environment Returns: Feature-state representation of the image given """ in_image = image.reshape((1, ) + image.shape) return self.features(in_image)
def __init__(self, obs: NDArray, n_outputs: int, kernel=(3, 3), padding='same', stride=2, nFilters=32): super(InvModel, self).__init__() inp_layer = tf.keras.Input(shape=obs.shape) self.conv1 = tf.keras.layers.Conv2D(nFilters, kernel, strides=stride, padding=padding, activation='elu') self.conv2 = tf.keras.layers.Conv2D(nFilters, kernel, strides=stride, padding=padding, activation='elu') self.conv3 = tf.keras.layers.Conv2D(nFilters, kernel, strides=stride, padding=padding, activation='elu') self.conv4 = tf.keras.layers.Conv2D(nFilters, kernel, strides=stride, padding=padding, activation='elu') self.flat1 = tf.keras.layers.Flatten() self.concat = tf.keras.layers.Concatenate(axis=1) self.dense1 = tf.keras.layers.Dense(256, activation='elu') self.dense2 = tf.keras.layers.Dense(n_outputs, activation='softmax') self.features_out = self.flat1( self.conv4(self.conv3(self.conv2(self.conv1(inp_layer))))) self.features = tf.keras.Model(inputs=[inp_layer], outputs=[self.features_out], name='Inverse Model') state = self.features(obs.reshape((1, ) + obs.shape)) self.prevState = tf.convert_to_tensor(state, dtype=tf.float32) self.out = self.call(inp_layer) super(InvModel, self).__init__(inputs=inp_layer, outputs=self.out)
def online_predict(self, data: NDArray, eeg: EEG): # Prepare the data to MNE functions data = data.astype(np.float64) # Filter the data ( band-pass only) data = mne.filter.filter_data(data, l_freq=8, h_freq=30, sfreq=eeg.sfreq, verbose=False) # Predict prediction = self.clf.predict(data[np.newaxis])[0] return prediction
def concat_vectors_as_cols(v1: NDArray, v2: NDArray) -> NDArray: n = v1.size return np.concatenate((v1.reshape(n, 1), v2.reshape(n, 1)), axis=1)
def test_instantiate(self): with self.assertRaises(TypeError) as err: NDArray([1, 2, 3]) self.assertIn('NDArray', str(err.exception))