def test_match_flatten(self): A = np.random.rand(10, 256, 6, 6) flattened, indices = flatten(A, return_index=True) for layer in range(A.shape[0]): for i in range(np.prod(A.shape[1:])): value = flattened[layer][i] index = indices[i] assert A[layer][tuple(index)] == value
def init_and_progress(layer, activations): activations = flatten(activations) if activations.shape[1] <= n_components: self._logger.debug(f"Not computing principal components for {layer} " f"activations {activations.shape} as shape is small enough already") pca = None else: pca = PCA(n_components=n_components, random_state=0) pca.fit(activations) progress.update(1) return pca
def apply_pca(layer, activations): pca = self._layer_pcas[layer] activations = flatten(activations) if pca is None: return activations return pca.transform(activations)
def test_inverse(self): A = np.random.rand(2560, 256, 6, 6) flattened = flatten(A) A_ = np.reshape(flattened, [flattened.shape[0], 256, 6, 6]) assert A.shape == A_.shape assert (A == A_).all()
def test_indices_shape(self): A = np.random.rand(2560, 256, 6, 6) _, indices = flatten(A, return_index=True) assert len(indices.shape) == 2 assert indices.shape[0] == np.prod(A.shape[1:]) assert indices.shape[1] == 3 # for 256, 6, 6
def test_flattened_shape(self): A = np.random.rand(2560, 256, 6, 6) flattened = flatten(A) assert np.prod(flattened.shape) == np.prod(A.shape) assert flattened.shape[0] == A.shape[0] assert len(flattened.shape) == 2