def dim_reduction(self, slash_filter): scaler = StandardScaler() scaler.fit( self.data) # Fit Data with StandardScaler() (get mean and stddev) self.data = scaler.transform( self.data) # Transform original data set based on the .fita pca = decomposition.PCA(n_components=2) pca.fit(self.data) self.data = pca.transform(self.data) # Renormalise the data so that the values of our pca sit nicely on a grid. self.pca_data = norm_np(self.data) self.pca_data_transposed = self.pca_data.transpose() self.pca_data_transposed_aslist = self.pca_data_transposed.tolist() for x, y, names in zip(self.pca_data_transposed_aslist[0], self.pca_data_transposed_aslist[1], self.keys): self.client.send_message('/data', [names, x, y])
printp('Starting PCA') data = np.array(list(raw_data)) ######### Dimensionality Reduction ########## scaler = StandardScaler() scaler.fit(data) # Fit Data with StandardScaler() (get mean and stddev) # Transform original data set based on the .fita data = scaler.transform(data) pca = decomposition.PCA(n_components=2) pca.fit(data) data = pca.transform(data) data_transposed = data.transpose() # X as one list, Y as another data_transposed = np.ndarray.tolist(data_transposed) # Normalisation data = norm_np(data) out_dict = {} for key, value in zip(keys, data): out_dict[key[:-5]] = list(value) write_json(os.path.join(parent, 'sketches', 'livedb_pca.json'), out_dict) # Write out pca data to stereo wav file printp('Writing data to output files') data = data.astype('float32') wavfile.write(os.path.join(parent, 'sketches', 'livedb_pca.wav'), 44100, data) printp('Done')
def normalise_cols(self, slash_filter): try: max_post('Normalising data.', self.client) self.data = norm_np(self.data) except: max_post('There is no data currently loaded.', self.client)